canns 0.13.1__py3-none-any.whl → 0.13.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
canns/data/__init__.py CHANGED
@@ -16,11 +16,13 @@ from .datasets import (
16
16
  get_data_dir,
17
17
  get_dataset_path,
18
18
  get_huggingface_upload_guide,
19
+ get_left_right_data_session,
20
+ get_left_right_npz,
19
21
  list_datasets,
20
22
  load,
21
23
  quick_setup,
22
24
  )
23
- from .loaders import load_grid_data, load_roi_data
25
+ from .loaders import load_grid_data, load_left_right_npz, load_roi_data
24
26
 
25
27
  __all__ = [
26
28
  # Dataset registry and management
@@ -31,6 +33,8 @@ __all__ = [
31
33
  "list_datasets",
32
34
  "download_dataset",
33
35
  "get_dataset_path",
36
+ "get_left_right_data_session",
37
+ "get_left_right_npz",
34
38
  "quick_setup",
35
39
  "get_huggingface_upload_guide",
36
40
  # Generic loading
@@ -38,4 +42,5 @@ __all__ = [
38
42
  # Specialized loaders
39
43
  "load_roi_data",
40
44
  "load_grid_data",
45
+ "load_left_right_npz",
41
46
  ]
canns/data/datasets.py CHANGED
@@ -38,6 +38,7 @@ DEFAULT_DATA_DIR = Path.home() / ".canns" / "data"
38
38
  # URLs for datasets on Hugging Face
39
39
  HUGGINGFACE_REPO = "canns-team/data-analysis-datasets"
40
40
  BASE_URL = f"https://huggingface.co/datasets/{HUGGINGFACE_REPO}/resolve/main/"
41
+ LEFT_RIGHT_DATASET_DIR = "Left_Right_data_of"
41
42
 
42
43
  # Dataset registry with metadata
43
44
  DATASETS = {
@@ -68,6 +69,16 @@ DATASETS = {
68
69
  "sha256": None,
69
70
  "url": f"{BASE_URL}grid_2.npz",
70
71
  },
72
+ "left_right_data_of": {
73
+ "filename": LEFT_RIGHT_DATASET_DIR,
74
+ "description": "ASA type data from Left-Right sweep paper",
75
+ "size_mb": 604.0,
76
+ "format": "directory",
77
+ "usage": "ASA analysis, left-right sweep sessions",
78
+ "sha256": None,
79
+ "url": f"{BASE_URL}{LEFT_RIGHT_DATASET_DIR}/",
80
+ "is_collection": True,
81
+ },
71
82
  }
72
83
 
73
84
 
@@ -130,7 +141,10 @@ def list_datasets() -> None:
130
141
  print("=" * 60)
131
142
 
132
143
  for key, info in DATASETS.items():
133
- status = "Available" if info["url"] else "Setup required"
144
+ if info.get("is_collection"):
145
+ status = "Collection (use session getter)"
146
+ else:
147
+ status = "Available" if info["url"] else "Setup required"
134
148
  print(f"\nDataset: {key}")
135
149
  print(f" File: {info['filename']}")
136
150
  print(f" Size: {info['size_mb']} MB")
@@ -162,6 +176,11 @@ def download_dataset(dataset_key: str, force: bool = False) -> Path | None:
162
176
 
163
177
  info = DATASETS[dataset_key]
164
178
 
179
+ if info.get("is_collection"):
180
+ print(f"{dataset_key} is a dataset collection.")
181
+ print("Use get_left_right_data_session(session_id) to download a session.")
182
+ return None
183
+
165
184
  if not info["url"]:
166
185
  print(f"{dataset_key} not yet available for download")
167
186
  print("Please use setup_local_datasets() to copy from local repository")
@@ -213,6 +232,10 @@ def get_dataset_path(dataset_key: str, auto_setup: bool = True) -> Path | None:
213
232
  if dataset_key not in DATASETS:
214
233
  print(f"Unknown dataset: {dataset_key}")
215
234
  return None
235
+ if DATASETS[dataset_key].get("is_collection"):
236
+ print(f"{dataset_key} is a dataset collection.")
237
+ print("Use get_left_right_data_session(session_id) to access session files.")
238
+ return None
216
239
 
217
240
  data_dir = get_data_dir()
218
241
  filepath = data_dir / DATASETS[dataset_key]["filename"]
@@ -236,6 +259,136 @@ def get_dataset_path(dataset_key: str, auto_setup: bool = True) -> Path | None:
236
259
  return None
237
260
 
238
261
 
262
+ def get_left_right_data_session(
263
+ session_id: str, auto_download: bool = True, force: bool = False
264
+ ) -> dict[str, Path | list[Path] | None] | None:
265
+ """
266
+ Download and return files for a Left_Right_data_of session.
267
+
268
+ Parameters
269
+ ----------
270
+ session_id : str
271
+ Session folder name, e.g. "24365_2".
272
+ auto_download : bool
273
+ Whether to download missing files automatically.
274
+ force : bool
275
+ Whether to force re-download of existing files.
276
+
277
+ Returns
278
+ -------
279
+ dict or None
280
+ Mapping with keys: "manifest", "full_file", "module_files".
281
+ """
282
+ if not session_id:
283
+ raise ValueError("session_id must be non-empty")
284
+
285
+ session_dir = get_data_dir() / LEFT_RIGHT_DATASET_DIR / session_id
286
+ session_dir.mkdir(parents=True, exist_ok=True)
287
+
288
+ manifest_filename = f"{session_id}_ASA_manifest.json"
289
+ manifest_url = f"{BASE_URL}{LEFT_RIGHT_DATASET_DIR}/{session_id}/{manifest_filename}"
290
+ manifest_path = session_dir / manifest_filename
291
+
292
+ if auto_download and (force or not manifest_path.exists()):
293
+ if not download_file_with_progress(manifest_url, manifest_path):
294
+ print(f"Failed to download manifest for session {session_id}")
295
+ return None
296
+
297
+ if not manifest_path.exists():
298
+ print(f"Manifest not found for session {session_id}")
299
+ return None
300
+
301
+ import json
302
+
303
+ with open(manifest_path) as f:
304
+ manifest = json.load(f)
305
+
306
+ full_file = manifest.get("full_file")
307
+ module_files = manifest.get("module_files", [])
308
+ requested_files: list[str] = []
309
+
310
+ if isinstance(full_file, str):
311
+ requested_files.append(Path(full_file).name)
312
+
313
+ if isinstance(module_files, list):
314
+ for module_file in module_files:
315
+ if isinstance(module_file, str):
316
+ requested_files.append(Path(module_file).name)
317
+
318
+ # De-duplicate while preserving order
319
+ seen: set[str] = set()
320
+ unique_files: list[str] = []
321
+ for filename in requested_files:
322
+ if filename and filename not in seen:
323
+ seen.add(filename)
324
+ unique_files.append(filename)
325
+
326
+ for filename in unique_files:
327
+ file_path = session_dir / filename
328
+ if auto_download and (force or not file_path.exists()):
329
+ file_url = f"{BASE_URL}{LEFT_RIGHT_DATASET_DIR}/{session_id}/{filename}"
330
+ if not download_file_with_progress(file_url, file_path):
331
+ print(f"Failed to download {filename} for session {session_id}")
332
+ return None
333
+
334
+ return {
335
+ "manifest": manifest_path,
336
+ "full_file": session_dir / Path(full_file).name if isinstance(full_file, str) else None,
337
+ "module_files": [
338
+ session_dir / Path(module_file).name
339
+ for module_file in module_files
340
+ if isinstance(module_file, str)
341
+ ],
342
+ }
343
+
344
+
345
+ def get_left_right_npz(
346
+ session_id: str, filename: str, auto_download: bool = True, force: bool = False
347
+ ) -> Path | None:
348
+ """
349
+ Download and return a specific Left_Right_data_of NPZ file.
350
+
351
+ Parameters
352
+ ----------
353
+ session_id : str
354
+ Session folder name, e.g. "26034_3".
355
+ filename : str
356
+ File name inside the session folder, e.g.
357
+ "26034_3_ASA_mec_gridModule02_n104_cm.npz".
358
+ auto_download : bool
359
+ Whether to download the file if missing.
360
+ force : bool
361
+ Whether to force re-download of existing files.
362
+
363
+ Returns
364
+ -------
365
+ Path or None
366
+ Path to the requested file if available, None otherwise.
367
+ """
368
+ if not session_id:
369
+ raise ValueError("session_id must be non-empty")
370
+ if not filename:
371
+ raise ValueError("filename must be non-empty")
372
+
373
+ safe_name = Path(filename).name
374
+ session_dir = get_data_dir() / LEFT_RIGHT_DATASET_DIR / session_id
375
+ session_dir.mkdir(parents=True, exist_ok=True)
376
+
377
+ file_path = session_dir / safe_name
378
+ if file_path.exists() and not force:
379
+ return file_path
380
+
381
+ if not auto_download:
382
+ return None
383
+
384
+ file_url = f"{BASE_URL}{LEFT_RIGHT_DATASET_DIR}/{session_id}/{safe_name}"
385
+ if not download_file_with_progress(file_url, file_path):
386
+ print(f"Failed to download {safe_name} for session {session_id}")
387
+ return None
388
+
389
+ return file_path
390
+
391
+
239
392
  def detect_file_type(filepath: Path) -> str:
240
393
  """Detect file type based on extension."""
241
394
  suffix = filepath.suffix.lower()
canns/data/loaders.py CHANGED
@@ -211,6 +211,43 @@ def load_grid_data(
211
211
  return None
212
212
 
213
213
 
214
+ def load_left_right_npz(
215
+ session_id: str, filename: str, auto_download: bool = True, force: bool = False
216
+ ) -> dict[str, Any] | None:
217
+ """
218
+ Load a Left_Right_data_of NPZ file.
219
+
220
+ Parameters
221
+ ----------
222
+ session_id : str
223
+ Session folder name, e.g. "26034_3".
224
+ filename : str
225
+ File name inside the session folder.
226
+ auto_download : bool
227
+ Whether to download the file if missing.
228
+ force : bool
229
+ Whether to force re-download of existing files.
230
+
231
+ Returns
232
+ -------
233
+ dict or None
234
+ Dictionary of npz arrays if successful, None otherwise.
235
+ """
236
+ try:
237
+ path = _datasets.get_left_right_npz(
238
+ session_id=session_id,
239
+ filename=filename,
240
+ auto_download=auto_download,
241
+ force=force,
242
+ )
243
+ if path is None:
244
+ return None
245
+ return dict(np.load(path, allow_pickle=True))
246
+ except Exception as e:
247
+ print(f"Failed to load Left-Right npz {session_id}/{filename}: {e}")
248
+ return None
249
+
250
+
214
251
  def validate_roi_data(data: np.ndarray) -> bool:
215
252
  """
216
253
  Validate ROI data format for 1D CANN analysis.
@@ -1,17 +1,9 @@
1
- """
2
- CANNs Pipeline Module
1
+ """CANNs pipeline entrypoints."""
3
2
 
4
- High-level pipelines for common analysis workflows, designed to make CANN models
5
- accessible to experimental researchers without requiring detailed knowledge of
6
- the underlying implementations.
7
- """
8
-
9
- from ._base import Pipeline
10
3
  from .asa import ASAApp
11
4
  from .asa import main as asa_main
5
+ from .gallery import GalleryApp
6
+ from .gallery import main as gallery_main
7
+ from .launcher import main as launcher_main
12
8
 
13
- __all__ = [
14
- "Pipeline",
15
- "ASAApp",
16
- "asa_main",
17
- ]
9
+ __all__ = ["ASAApp", "asa_main", "GalleryApp", "gallery_main", "launcher_main"]
@@ -0,0 +1,6 @@
1
+ """Run the canns TUI launcher as a module."""
2
+
3
+ from .launcher import main
4
+
5
+ if __name__ == "__main__":
6
+ main()
@@ -1,7 +1,17 @@
1
- """Gallery module for model demonstrations and examples.
1
+ """Model gallery TUI."""
2
2
 
3
- This module will contain interactive TUI demos for CANN models and other
4
- visualization examples in the future.
5
- """
3
+ import os
6
4
 
7
- __all__ = []
5
+ __all__ = ["GalleryApp", "main"]
6
+
7
+
8
+ def main() -> None:
9
+ """Entry point for the model gallery TUI."""
10
+ os.environ.setdefault("MPLBACKEND", "Agg")
11
+ from .app import GalleryApp
12
+
13
+ app = GalleryApp()
14
+ app.run()
15
+
16
+
17
+ from .app import GalleryApp
@@ -0,0 +1,11 @@
1
+ """Main entry point for running the gallery TUI as a module."""
2
+
3
+ import os
4
+
5
+ os.environ.setdefault("MPLBACKEND", "Agg")
6
+
7
+ from .app import GalleryApp
8
+
9
+ if __name__ == "__main__":
10
+ app = GalleryApp()
11
+ app.run()