argus-cv 1.4.0__tar.gz → 1.5.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of argus-cv might be problematic. Click here for more details.

Files changed (44) hide show
  1. {argus_cv-1.4.0 → argus_cv-1.5.1}/CHANGELOG.md +16 -0
  2. {argus_cv-1.4.0 → argus_cv-1.5.1}/PKG-INFO +1 -1
  3. {argus_cv-1.4.0 → argus_cv-1.5.1}/docs/guides/datasets.md +20 -0
  4. argus_cv-1.5.1/docs/guides/filtering.md +101 -0
  5. {argus_cv-1.4.0 → argus_cv-1.5.1}/mkdocs.yml +1 -0
  6. {argus_cv-1.4.0 → argus_cv-1.5.1}/pyproject.toml +1 -1
  7. {argus_cv-1.4.0 → argus_cv-1.5.1}/src/argus/__init__.py +1 -1
  8. {argus_cv-1.4.0 → argus_cv-1.5.1}/src/argus/cli.py +345 -1
  9. argus_cv-1.5.1/src/argus/core/__init__.py +37 -0
  10. {argus_cv-1.4.0 → argus_cv-1.5.1}/src/argus/core/coco.py +46 -8
  11. argus_cv-1.5.1/src/argus/core/convert.py +277 -0
  12. argus_cv-1.5.1/src/argus/core/filter.py +670 -0
  13. {argus_cv-1.4.0 → argus_cv-1.5.1}/src/argus/core/yolo.py +29 -0
  14. {argus_cv-1.4.0 → argus_cv-1.5.1}/tests/conftest.py +86 -0
  15. argus_cv-1.5.1/tests/test_convert.py +541 -0
  16. argus_cv-1.5.1/tests/test_filter_command.py +659 -0
  17. {argus_cv-1.4.0 → argus_cv-1.5.1}/tests/test_list_command.py +73 -0
  18. argus_cv-1.4.0/src/argus/core/__init__.py +0 -17
  19. {argus_cv-1.4.0 → argus_cv-1.5.1}/.github/workflows/ci.yml +0 -0
  20. {argus_cv-1.4.0 → argus_cv-1.5.1}/.github/workflows/docs.yml +0 -0
  21. {argus_cv-1.4.0 → argus_cv-1.5.1}/.github/workflows/release.yml +0 -0
  22. {argus_cv-1.4.0 → argus_cv-1.5.1}/.gitignore +0 -0
  23. {argus_cv-1.4.0 → argus_cv-1.5.1}/.pre-commit-config.yaml +0 -0
  24. {argus_cv-1.4.0 → argus_cv-1.5.1}/README.md +0 -0
  25. {argus_cv-1.4.0 → argus_cv-1.5.1}/docs/assets/javascripts/extra.js +0 -0
  26. {argus_cv-1.4.0 → argus_cv-1.5.1}/docs/assets/stylesheets/extra.css +0 -0
  27. {argus_cv-1.4.0 → argus_cv-1.5.1}/docs/getting-started/installation.md +0 -0
  28. {argus_cv-1.4.0 → argus_cv-1.5.1}/docs/getting-started/quickstart.md +0 -0
  29. {argus_cv-1.4.0 → argus_cv-1.5.1}/docs/guides/listing.md +0 -0
  30. {argus_cv-1.4.0 → argus_cv-1.5.1}/docs/guides/splitting.md +0 -0
  31. {argus_cv-1.4.0 → argus_cv-1.5.1}/docs/guides/stats.md +0 -0
  32. {argus_cv-1.4.0 → argus_cv-1.5.1}/docs/guides/viewer.md +0 -0
  33. {argus_cv-1.4.0 → argus_cv-1.5.1}/docs/index.md +0 -0
  34. {argus_cv-1.4.0 → argus_cv-1.5.1}/docs/reference/cli.md +0 -0
  35. {argus_cv-1.4.0 → argus_cv-1.5.1}/src/argus/__main__.py +0 -0
  36. {argus_cv-1.4.0 → argus_cv-1.5.1}/src/argus/commands/__init__.py +0 -0
  37. {argus_cv-1.4.0 → argus_cv-1.5.1}/src/argus/core/base.py +0 -0
  38. {argus_cv-1.4.0 → argus_cv-1.5.1}/src/argus/core/mask.py +0 -0
  39. {argus_cv-1.4.0 → argus_cv-1.5.1}/src/argus/core/split.py +0 -0
  40. {argus_cv-1.4.0 → argus_cv-1.5.1}/tests/test_classification.py +0 -0
  41. {argus_cv-1.4.0 → argus_cv-1.5.1}/tests/test_mask.py +0 -0
  42. {argus_cv-1.4.0 → argus_cv-1.5.1}/tests/test_split_command.py +0 -0
  43. {argus_cv-1.4.0 → argus_cv-1.5.1}/tests/test_stats_command.py +0 -0
  44. {argus_cv-1.4.0 → argus_cv-1.5.1}/uv.lock +0 -0
@@ -2,6 +2,22 @@
2
2
 
3
3
  <!-- version list -->
4
4
 
5
+ ## v1.5.1 (2026-01-28)
6
+
7
+ ### Bug Fixes
8
+
9
+ - Add missing documentation for filter command
10
+ ([`dc41fbb`](https://github.com/pirnerjonas/argus/commit/dc41fbb724faf2024ec9f1430e2af0a6af000d21))
11
+
12
+
13
+ ## v1.5.0 (2026-01-28)
14
+
15
+ ### Features
16
+
17
+ - Support Roboflow COCO format and improve YOLO classification detection
18
+ ([`902a59f`](https://github.com/pirnerjonas/argus/commit/902a59fbb506aa586d8677312d21ae240585b511))
19
+
20
+
5
21
  ## v1.4.0 (2026-01-26)
6
22
 
7
23
  ### Features
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: argus-cv
3
- Version: 1.4.0
3
+ Version: 1.5.1
4
4
  Summary: CLI tool for working with vision AI datasets
5
5
  Requires-Python: >=3.10
6
6
  Requires-Dist: numpy>=1.24.0
@@ -59,6 +59,26 @@ dataset/
59
59
  If your annotation filenames include `train`, `val`, or `test`, Argus will treat
60
60
  those as splits. Otherwise it defaults to `train`.
61
61
 
62
+ ### Roboflow COCO
63
+
64
+ Argus also supports the Roboflow variant of COCO format, where annotations live
65
+ inside split directories:
66
+
67
+ ```text
68
+ dataset/
69
+ ├── train/
70
+ │ ├── _annotations.coco.json
71
+ │ └── *.jpg
72
+ ├── valid/
73
+ │ ├── _annotations.coco.json
74
+ │ └── *.jpg
75
+ └── test/
76
+ ├── _annotations.coco.json
77
+ └── *.jpg
78
+ ```
79
+
80
+ Splits are detected from directory names (`train`, `valid`/`val`, `test`).
81
+
62
82
  ## Mask (semantic segmentation)
63
83
 
64
84
  Mask datasets are simple image + mask folders. Argus detects a few common
@@ -0,0 +1,101 @@
1
+ # Filtering datasets
2
+
3
+ Use `argus-cv filter` to create a filtered copy of a dataset containing only specified classes.
4
+
5
+ ## Basic usage
6
+
7
+ ```bash
8
+ argus-cv filter -d /datasets/coco -o /datasets/coco_filtered --classes person,car
9
+ ```
10
+
11
+ This creates a new dataset with only the `person` and `car` classes. Class IDs are automatically remapped to sequential values (0, 1, 2, ...).
12
+
13
+ ## Filter to a single class
14
+
15
+ ```bash
16
+ argus-cv filter -d /datasets/yolo -o /datasets/yolo_balls --classes ball
17
+ ```
18
+
19
+ ## Exclude background images
20
+
21
+ By default, images without annotations (after filtering) are kept. Use `--no-background` to exclude them:
22
+
23
+ ```bash
24
+ argus-cv filter -d /datasets/coco -o /datasets/coco_filtered --classes dog --no-background
25
+ ```
26
+
27
+ This is useful when you want a dataset with only images that contain your target class.
28
+
29
+ ## Use symlinks for faster filtering
30
+
31
+ For large datasets, use `--symlinks` to create symbolic links instead of copying images:
32
+
33
+ ```bash
34
+ argus-cv filter -d /datasets/large -o /datasets/filtered --classes cat --symlinks
35
+ ```
36
+
37
+ This saves disk space and speeds up the filtering process significantly.
38
+
39
+ ## Supported formats
40
+
41
+ The filter command works with all dataset formats:
42
+
43
+ | Format | Supported | Notes |
44
+ |--------|-----------|-------|
45
+ | YOLO Detection | Yes | Labels remapped to new class IDs |
46
+ | YOLO Segmentation | Yes | Polygon annotations preserved |
47
+ | YOLO Classification | Yes | Only selected class directories copied |
48
+ | COCO | Yes | Annotations and category IDs remapped |
49
+ | Mask | Yes | Pixel values remapped to new class IDs |
50
+
51
+ ## Output layout
52
+
53
+ The output preserves the original dataset structure with train/val/test splits.
54
+
55
+ YOLO output:
56
+
57
+ ```text
58
+ output/
59
+ ├── data.yaml
60
+ ├── images/
61
+ │ ├── train/
62
+ │ ├── val/
63
+ │ └── test/
64
+ └── labels/
65
+ ├── train/
66
+ ├── val/
67
+ └── test/
68
+ ```
69
+
70
+ COCO output:
71
+
72
+ ```text
73
+ output/
74
+ ├── annotations/
75
+ │ ├── instances_train.json
76
+ │ ├── instances_val.json
77
+ │ └── instances_test.json
78
+ └── images/
79
+ ├── train/
80
+ ├── val/
81
+ └── test/
82
+ ```
83
+
84
+ ## Class ID remapping
85
+
86
+ When filtering, class IDs are remapped to start from 0 and be sequential. For example:
87
+
88
+ | Original | Filtered |
89
+ |----------|----------|
90
+ | 0: person | (removed) |
91
+ | 1: car | 0: car |
92
+ | 2: dog | 1: dog |
93
+ | 3: cat | (removed) |
94
+
95
+ If you filter to keep only `car` and `dog`, the new dataset will have `car` as class 0 and `dog` as class 1.
96
+
97
+ ## Common errors
98
+
99
+ - "No classes specified": You must provide at least one class name with `--classes`.
100
+ - "Classes not found in dataset": Check the class names match exactly (case-sensitive). Use `argus-cv stats` to see available classes.
101
+ - "Output directory already exists": The output directory must be empty or non-existent.
@@ -64,5 +64,6 @@ nav:
64
64
  - Stats and counts: guides/stats.md
65
65
  - Visual inspection: guides/viewer.md
66
66
  - Splitting datasets: guides/splitting.md
67
+ - Filtering datasets: guides/filtering.md
67
68
  - Reference:
68
69
  - CLI reference: reference/cli.md
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "argus-cv"
3
- version = "1.4.0"
3
+ version = "1.5.1"
4
4
  description = "CLI tool for working with vision AI datasets"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10"
@@ -1,3 +1,3 @@
1
1
  """Argus - Vision AI dataset toolkit."""
2
2
 
3
- __version__ = "1.4.0"
3
+ __version__ = "1.5.1"
@@ -8,11 +8,23 @@ import cv2
8
8
  import numpy as np
9
9
  import typer
10
10
  from rich.console import Console
11
- from rich.progress import Progress, SpinnerColumn, TextColumn
11
+ from rich.progress import (
12
+ BarColumn,
13
+ Progress,
14
+ SpinnerColumn,
15
+ TaskProgressColumn,
16
+ TextColumn,
17
+ )
12
18
  from rich.table import Table
13
19
 
14
20
  from argus.core import COCODataset, Dataset, MaskDataset, YOLODataset
15
21
  from argus.core.base import DatasetFormat, TaskType
22
+ from argus.core.convert import convert_mask_to_yolo_seg
23
+ from argus.core.filter import (
24
+ filter_coco_dataset,
25
+ filter_mask_dataset,
26
+ filter_yolo_dataset,
27
+ )
16
28
  from argus.core.split import (
17
29
  is_coco_unsplit,
18
30
  parse_ratio,
@@ -632,6 +644,338 @@ def split_dataset(
632
644
  )
633
645
 
634
646
 
647
+ @app.command(name="convert")
648
+ def convert_dataset(
649
+ input_path: Annotated[
650
+ Path,
651
+ typer.Option(
652
+ "--input-path",
653
+ "-i",
654
+ help="Path to the source dataset.",
655
+ ),
656
+ ] = Path("."),
657
+ output_path: Annotated[
658
+ Path,
659
+ typer.Option(
660
+ "--output-path",
661
+ "-o",
662
+ help="Output directory for converted dataset.",
663
+ ),
664
+ ] = Path("converted"),
665
+ to_format: Annotated[
666
+ str,
667
+ typer.Option(
668
+ "--to",
669
+ help="Target format (currently only 'yolo-seg' is supported).",
670
+ ),
671
+ ] = "yolo-seg",
672
+ epsilon_factor: Annotated[
673
+ float,
674
+ typer.Option(
675
+ "--epsilon-factor",
676
+ "-e",
677
+ help="Polygon simplification factor (Douglas-Peucker algorithm).",
678
+ min=0.0,
679
+ max=1.0,
680
+ ),
681
+ ] = 0.005,
682
+ min_area: Annotated[
683
+ float,
684
+ typer.Option(
685
+ "--min-area",
686
+ "-a",
687
+ help="Minimum contour area in pixels to include.",
688
+ min=0.0,
689
+ ),
690
+ ] = 100.0,
691
+ ) -> None:
692
+ """Convert a dataset from one format to another.
693
+
694
+ Currently supports converting MaskDataset to YOLO segmentation format.
695
+
696
+ Example:
697
+ uvx argus-cv convert -i /path/to/masks -o /path/to/output --to yolo-seg
698
+ """
699
+ # Validate format
700
+ if to_format != "yolo-seg":
701
+ console.print(
702
+ f"[red]Error: Unsupported target format '{to_format}'.[/red]\n"
703
+ "[yellow]Currently only 'yolo-seg' is supported.[/yellow]"
704
+ )
705
+ raise typer.Exit(1)
706
+
707
+ # Resolve and validate input path
708
+ input_path = input_path.resolve()
709
+ if not input_path.exists():
710
+ console.print(f"[red]Error: Path does not exist: {input_path}[/red]")
711
+ raise typer.Exit(1)
712
+ if not input_path.is_dir():
713
+ console.print(f"[red]Error: Path is not a directory: {input_path}[/red]")
714
+ raise typer.Exit(1)
715
+
716
+ # Detect source dataset - must be MaskDataset for yolo-seg conversion
717
+ dataset = MaskDataset.detect(input_path)
718
+ if not dataset:
719
+ console.print(
720
+ f"[red]Error: No MaskDataset found at {input_path}[/red]\n"
721
+ "[yellow]Ensure the path contains images/ + masks/ directories "
722
+ "(or equivalent patterns like img/+gt/ or leftImg8bit/+gtFine/).[/yellow]"
723
+ )
724
+ raise typer.Exit(1)
725
+
726
+ # Resolve output path
727
+ if not output_path.is_absolute():
728
+ output_path = input_path.parent / output_path
729
+ output_path = output_path.resolve()
730
+
731
+ # Check if output already exists
732
+ if output_path.exists() and any(output_path.iterdir()):
733
+ console.print(
734
+ f"[red]Error: Output directory already exists and is not empty: "
735
+ f"{output_path}[/red]"
736
+ )
737
+ raise typer.Exit(1)
738
+
739
+ # Show conversion info
740
+ console.print("[cyan]Converting MaskDataset to YOLO segmentation format[/cyan]")
741
+ console.print(f" Source: {input_path}")
742
+ console.print(f" Output: {output_path}")
743
+ console.print(f" Classes: {dataset.num_classes}")
744
+ splits_str = ", ".join(dataset.splits) if dataset.splits else "unsplit"
745
+ console.print(f" Splits: {splits_str}")
746
+ console.print()
747
+
748
+ # Run conversion with progress bar
749
+ with Progress(
750
+ SpinnerColumn(),
751
+ TextColumn("[progress.description]{task.description}"),
752
+ BarColumn(),
753
+ TaskProgressColumn(),
754
+ console=console,
755
+ ) as progress:
756
+ task = progress.add_task("Processing images...", total=None)
757
+
758
+ def update_progress(current: int, total: int) -> None:
759
+ progress.update(task, completed=current, total=total)
760
+
761
+ try:
762
+ stats = convert_mask_to_yolo_seg(
763
+ dataset=dataset,
764
+ output_path=output_path,
765
+ epsilon_factor=epsilon_factor,
766
+ min_area=min_area,
767
+ progress_callback=update_progress,
768
+ )
769
+ except Exception as exc:
770
+ console.print(f"[red]Error during conversion: {exc}[/red]")
771
+ raise typer.Exit(1) from exc
772
+
773
+ # Show results
774
+ console.print()
775
+ console.print("[green]Conversion complete![/green]")
776
+ console.print(f" Images processed: {stats['images']}")
777
+ console.print(f" Labels created: {stats['labels']}")
778
+ console.print(f" Polygons extracted: {stats['polygons']}")
779
+
780
+ if stats["skipped"] > 0:
781
+ skipped = stats["skipped"]
782
+ console.print(f" [yellow]Skipped: {skipped} (no mask or empty)[/yellow]")
783
+ if stats["warnings"] > 0:
784
+ console.print(f" [yellow]Warnings: {stats['warnings']}[/yellow]")
785
+
786
+ console.print(f"\n[cyan]Output dataset: {output_path}[/cyan]")
787
+
788
+
789
+ @app.command(name="filter")
790
+ def filter_dataset(
791
+ dataset_path: Annotated[
792
+ Path,
793
+ typer.Option(
794
+ "--dataset-path",
795
+ "-d",
796
+ help="Path to the dataset root directory.",
797
+ ),
798
+ ] = Path("."),
799
+ output_path: Annotated[
800
+ Path,
801
+ typer.Option(
802
+ "--output",
803
+ "-o",
804
+ help="Output directory for filtered dataset.",
805
+ ),
806
+ ] = Path("filtered"),
807
+ classes: Annotated[
808
+ str,
809
+ typer.Option(
810
+ "--classes",
811
+ "-c",
812
+ help="Comma-separated list of class names to keep.",
813
+ ),
814
+ ] = "",
815
+ no_background: Annotated[
816
+ bool,
817
+ typer.Option(
818
+ "--no-background",
819
+ help="Exclude images with no annotations after filtering.",
820
+ ),
821
+ ] = False,
822
+ use_symlinks: Annotated[
823
+ bool,
824
+ typer.Option(
825
+ "--symlinks",
826
+ help="Use symlinks instead of copying images.",
827
+ ),
828
+ ] = False,
829
+ ) -> None:
830
+ """Filter a dataset by class names.
831
+
832
+ Creates a filtered copy of the dataset containing only the specified classes.
833
+ Class IDs are remapped to sequential values (0, 1, 2, ...).
834
+
835
+ Examples:
836
+ argus-cv filter -d dataset -o output --classes ball --no-background
837
+ argus-cv filter -d dataset -o output --classes ball,player
838
+ argus-cv filter -d dataset -o output --classes ball --symlinks
839
+ """
840
+ # Resolve path and validate
841
+ dataset_path = dataset_path.resolve()
842
+ if not dataset_path.exists():
843
+ console.print(f"[red]Error: Path does not exist: {dataset_path}[/red]")
844
+ raise typer.Exit(1)
845
+ if not dataset_path.is_dir():
846
+ console.print(f"[red]Error: Path is not a directory: {dataset_path}[/red]")
847
+ raise typer.Exit(1)
848
+
849
+ # Parse classes
850
+ if not classes:
851
+ console.print(
852
+ "[red]Error: No classes specified. "
853
+ "Use --classes to specify classes to keep.[/red]"
854
+ )
855
+ raise typer.Exit(1)
856
+
857
+ class_list = [c.strip() for c in classes.split(",") if c.strip()]
858
+ if not class_list:
859
+ console.print("[red]Error: No valid class names provided.[/red]")
860
+ raise typer.Exit(1)
861
+
862
+ # Detect dataset
863
+ dataset = _detect_dataset(dataset_path)
864
+ if not dataset:
865
+ console.print(
866
+ f"[red]Error: No dataset found at {dataset_path}[/red]\n"
867
+ "[yellow]Ensure the path points to a dataset root containing "
868
+ "data.yaml (YOLO), annotations/ folder (COCO), or "
869
+ "images/ + masks/ directories (Mask).[/yellow]"
870
+ )
871
+ raise typer.Exit(1)
872
+
873
+ # Validate classes exist in dataset
874
+ missing_classes = [c for c in class_list if c not in dataset.class_names]
875
+ if missing_classes:
876
+ available = ", ".join(dataset.class_names)
877
+ missing = ", ".join(missing_classes)
878
+ console.print(
879
+ f"[red]Error: Classes not found in dataset: {missing}[/red]\n"
880
+ f"[yellow]Available classes: {available}[/yellow]"
881
+ )
882
+ raise typer.Exit(1)
883
+
884
+ # Resolve output path
885
+ if not output_path.is_absolute():
886
+ output_path = dataset_path.parent / output_path
887
+ output_path = output_path.resolve()
888
+
889
+ # Check if output already exists
890
+ if output_path.exists() and any(output_path.iterdir()):
891
+ console.print(
892
+ f"[red]Error: Output directory already exists and is not empty: "
893
+ f"{output_path}[/red]"
894
+ )
895
+ raise typer.Exit(1)
896
+
897
+ # Show filter info
898
+ console.print(f"[cyan]Filtering {dataset.format.value.upper()} dataset[/cyan]")
899
+ console.print(f" Source: {dataset_path}")
900
+ console.print(f" Output: {output_path}")
901
+ console.print(f" Classes to keep: {', '.join(class_list)}")
902
+ console.print(f" Exclude background: {no_background}")
903
+ console.print(f" Use symlinks: {use_symlinks}")
904
+ console.print()
905
+
906
+ # Run filtering with progress bar
907
+ with Progress(
908
+ SpinnerColumn(),
909
+ TextColumn("[progress.description]{task.description}"),
910
+ BarColumn(),
911
+ TaskProgressColumn(),
912
+ console=console,
913
+ ) as progress:
914
+ task = progress.add_task("Filtering dataset...", total=None)
915
+
916
+ def update_progress(current: int, total: int) -> None:
917
+ progress.update(task, completed=current, total=total)
918
+
919
+ try:
920
+ if dataset.format == DatasetFormat.YOLO:
921
+ assert isinstance(dataset, YOLODataset)
922
+ stats = filter_yolo_dataset(
923
+ dataset=dataset,
924
+ output_path=output_path,
925
+ classes=class_list,
926
+ no_background=no_background,
927
+ use_symlinks=use_symlinks,
928
+ progress_callback=update_progress,
929
+ )
930
+ elif dataset.format == DatasetFormat.COCO:
931
+ assert isinstance(dataset, COCODataset)
932
+ stats = filter_coco_dataset(
933
+ dataset=dataset,
934
+ output_path=output_path,
935
+ classes=class_list,
936
+ no_background=no_background,
937
+ use_symlinks=use_symlinks,
938
+ progress_callback=update_progress,
939
+ )
940
+ elif dataset.format == DatasetFormat.MASK:
941
+ assert isinstance(dataset, MaskDataset)
942
+ stats = filter_mask_dataset(
943
+ dataset=dataset,
944
+ output_path=output_path,
945
+ classes=class_list,
946
+ no_background=no_background,
947
+ use_symlinks=use_symlinks,
948
+ progress_callback=update_progress,
949
+ )
950
+ else:
951
+ console.print(
952
+ f"[red]Error: Unsupported dataset format: {dataset.format}[/red]"
953
+ )
954
+ raise typer.Exit(1)
955
+ except ValueError as exc:
956
+ console.print(f"[red]Error: {exc}[/red]")
957
+ raise typer.Exit(1) from exc
958
+ except Exception as exc:
959
+ console.print(f"[red]Error during filtering: {exc}[/red]")
960
+ raise typer.Exit(1) from exc
961
+
962
+ # Show results
963
+ console.print()
964
+ console.print("[green]Filtering complete![/green]")
965
+ console.print(f" Images: {stats.get('images', 0)}")
966
+ if "labels" in stats:
967
+ console.print(f" Labels: {stats['labels']}")
968
+ if "annotations" in stats:
969
+ console.print(f" Annotations: {stats['annotations']}")
970
+ if "masks" in stats:
971
+ console.print(f" Masks: {stats['masks']}")
972
+ if stats.get("skipped", 0) > 0:
973
+ skipped = stats["skipped"]
974
+ console.print(f" [yellow]Skipped: {skipped} (background images)[/yellow]")
975
+
976
+ console.print(f"\n[cyan]Output dataset: {output_path}[/cyan]")
977
+
978
+
635
979
  class _ImageViewer:
636
980
  """Interactive image viewer with zoom and pan support."""
637
981
 
@@ -0,0 +1,37 @@
1
+ """Core dataset detection and handling."""
2
+
3
+ from argus.core.base import Dataset
4
+ from argus.core.coco import COCODataset
5
+ from argus.core.convert import (
6
+ ConversionParams,
7
+ Polygon,
8
+ convert_mask_to_yolo_labels,
9
+ convert_mask_to_yolo_seg,
10
+ mask_to_polygons,
11
+ )
12
+ from argus.core.filter import (
13
+ filter_coco_dataset,
14
+ filter_mask_dataset,
15
+ filter_yolo_dataset,
16
+ )
17
+ from argus.core.mask import ConfigurationError, MaskDataset
18
+ from argus.core.split import split_coco_dataset, split_yolo_dataset
19
+ from argus.core.yolo import YOLODataset
20
+
21
+ __all__ = [
22
+ "Dataset",
23
+ "YOLODataset",
24
+ "COCODataset",
25
+ "MaskDataset",
26
+ "ConfigurationError",
27
+ "split_coco_dataset",
28
+ "split_yolo_dataset",
29
+ "filter_yolo_dataset",
30
+ "filter_coco_dataset",
31
+ "filter_mask_dataset",
32
+ "ConversionParams",
33
+ "Polygon",
34
+ "mask_to_polygons",
35
+ "convert_mask_to_yolo_labels",
36
+ "convert_mask_to_yolo_seg",
37
+ ]