vesper-wizard 2.0.7 → 2.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -222,6 +222,28 @@ export_dataset(
222
222
 
223
223
  ---
224
224
 
225
+ #### `vesper_download_assets`
226
+ Download image/media assets to a user-controlled local directory.
227
+
228
+ **Parameters:**
229
+ - `dataset_id` (string): Dataset identifier
230
+ - `source` (string): `huggingface`, `kaggle`, or `url`
231
+ - `target_dir` (string, optional): Exact local directory where assets should be written
232
+ - `output_dir` (string, optional): Alias for `target_dir`
233
+ - `output_format` (string, optional): `webdataset`, `imagefolder`, or `parquet`
234
+
235
+ **Example:**
236
+ ```
237
+ vesper_download_assets(
238
+ dataset_id="cats_vs_dogs",
239
+ source="kaggle",
240
+ target_dir="./datasets/cats_dogs_100",
241
+ output_format="imagefolder"
242
+ )
243
+ ```
244
+
245
+ ---
246
+
225
247
  ### Quality Analysis
226
248
 
227
249
  #### `analyze_image_quality`
package/build/index.js CHANGED
@@ -960,6 +960,8 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
960
960
  kaggle_ref: { type: "string", description: "Kaggle dataset ref (owner/dataset)." },
961
961
  urls: { type: "array", items: { type: "string" }, description: "Direct asset URLs." },
962
962
  output_format: { type: "string", enum: ["webdataset", "imagefolder", "parquet"], description: "Output asset format." },
963
+ target_dir: { type: "string", description: "Optional local directory where downloaded assets should be written. If provided, Vesper writes directly to this directory instead of managed asset storage." },
964
+ output_dir: { type: "string", description: "Alias for target_dir. When provided, downloaded assets are written directly to this local directory." },
963
965
  max_items: { type: "number", description: "Optional cap on number of assets to fetch." },
964
966
  workers: { type: "number", description: "Parallel worker count (default 8)." },
965
967
  image_column: { type: "string", description: "Explicit image column name. If omitted, auto-detected from HF features, column names, and sample values." },
@@ -1521,6 +1523,11 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
1521
1523
  ? (request.params.arguments?.urls).map(v => String(v))
1522
1524
  : undefined;
1523
1525
  const outputFormat = String(request.params.arguments?.output_format || "webdataset");
1526
+ const requestedOutputDir = request.params.arguments?.target_dir
1527
+ ? String(request.params.arguments.target_dir).trim()
1528
+ : request.params.arguments?.output_dir
1529
+ ? String(request.params.arguments.output_dir).trim()
1530
+ : undefined;
1524
1531
  const maxItems = request.params.arguments?.max_items ? Number(request.params.arguments.max_items) : undefined;
1525
1532
  const workers = request.params.arguments?.workers ? Number(request.params.arguments.workers) : 8;
1526
1533
  const imageColumn = request.params.arguments?.image_column ? String(request.params.arguments.image_column) : undefined;
@@ -1563,6 +1570,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
1563
1570
  kaggle_ref: kaggleRef,
1564
1571
  urls,
1565
1572
  output_format: outputFormat,
1573
+ output_dir: requestedOutputDir,
1566
1574
  max_items: maxItems,
1567
1575
  workers,
1568
1576
  image_column: imageColumn,
@@ -26,6 +26,7 @@ def _print(payload: Dict[str, Any]) -> None:
26
26
  async def _run_download(args: argparse.Namespace) -> Dict[str, Any]:
27
27
  payload = json.loads(args.payload)
28
28
  output_root = payload.get("output_root") or str(Path.home() / ".vesper" / "data" / "assets")
29
+ output_dir = payload.get("output_dir")
29
30
  workers = int(payload.get("workers") or 8)
30
31
  recipes_dir = payload.get("recipes_dir")
31
32
 
@@ -43,6 +44,7 @@ async def _run_download(args: argparse.Namespace) -> Dict[str, Any]:
43
44
  kaggle_ref=payload.get("kaggle_ref"),
44
45
  urls=payload.get("urls"),
45
46
  output_format=payload.get("output_format", "webdataset"),
47
+ output_dir=str(output_dir) if output_dir else None,
46
48
  max_items=payload.get("max_items"),
47
49
  image_column=payload.get("image_column"),
48
50
  )
@@ -191,6 +191,7 @@ class AssetDownloader:
191
191
  kaggle_ref: Optional[str] = None,
192
192
  urls: Optional[List[str]] = None,
193
193
  output_format: str = "webdataset",
194
+ output_dir: Optional[str] = None,
194
195
  max_items: Optional[int] = None,
195
196
  image_column: Optional[str] = None,
196
197
  ) -> Dict[str, Any]:
@@ -231,7 +232,10 @@ class AssetDownloader:
231
232
  raise ValueError("urls are required for source=url")
232
233
 
233
234
  # --- Now safe to create directories ---
234
- dataset_dir = self.output_root / dataset_id.replace("/", "_").replace(":", "_")
235
+ if output_dir:
236
+ dataset_dir = Path(output_dir).expanduser().resolve()
237
+ else:
238
+ dataset_dir = self.output_root / dataset_id.replace("/", "_").replace(":", "_")
235
239
  images_dir = dataset_dir / "images"
236
240
  dataset_dir.mkdir(parents=True, exist_ok=True)
237
241
  images_dir.mkdir(parents=True, exist_ok=True)
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "vesper-wizard",
3
- "version": "2.0.7",
3
+ "version": "2.0.8",
4
4
  "description": "AI-powered dataset discovery, quality analysis, and preparation MCP server with multimodal support (text, image, audio, video)",
5
5
  "type": "module",
6
6
  "main": "build/index.js",
@@ -26,6 +26,7 @@ def _print(payload: Dict[str, Any]) -> None:
26
26
  async def _run_download(args: argparse.Namespace) -> Dict[str, Any]:
27
27
  payload = json.loads(args.payload)
28
28
  output_root = payload.get("output_root") or str(Path.home() / ".vesper" / "data" / "assets")
29
+ output_dir = payload.get("output_dir")
29
30
  workers = int(payload.get("workers") or 8)
30
31
  recipes_dir = payload.get("recipes_dir")
31
32
 
@@ -43,6 +44,7 @@ async def _run_download(args: argparse.Namespace) -> Dict[str, Any]:
43
44
  kaggle_ref=payload.get("kaggle_ref"),
44
45
  urls=payload.get("urls"),
45
46
  output_format=payload.get("output_format", "webdataset"),
47
+ output_dir=str(output_dir) if output_dir else None,
46
48
  max_items=payload.get("max_items"),
47
49
  image_column=payload.get("image_column"),
48
50
  )
@@ -191,6 +191,7 @@ class AssetDownloader:
191
191
  kaggle_ref: Optional[str] = None,
192
192
  urls: Optional[List[str]] = None,
193
193
  output_format: str = "webdataset",
194
+ output_dir: Optional[str] = None,
194
195
  max_items: Optional[int] = None,
195
196
  image_column: Optional[str] = None,
196
197
  ) -> Dict[str, Any]:
@@ -231,7 +232,10 @@ class AssetDownloader:
231
232
  raise ValueError("urls are required for source=url")
232
233
 
233
234
  # --- Now safe to create directories ---
234
- dataset_dir = self.output_root / dataset_id.replace("/", "_").replace(":", "_")
235
+ if output_dir:
236
+ dataset_dir = Path(output_dir).expanduser().resolve()
237
+ else:
238
+ dataset_dir = self.output_root / dataset_id.replace("/", "_").replace(":", "_")
235
239
  images_dir = dataset_dir / "images"
236
240
  dataset_dir.mkdir(parents=True, exist_ok=True)
237
241
  images_dir.mkdir(parents=True, exist_ok=True)