argus-cv 1.2.0__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of argus-cv might be problematic. Click here for more details.

argus/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """Argus - Vision AI dataset toolkit."""
2
2
 
3
- __version__ = "1.2.0"
3
+ __version__ = "1.3.0"
argus/cli.py CHANGED
@@ -11,8 +11,8 @@ from rich.console import Console
11
11
  from rich.progress import Progress, SpinnerColumn, TextColumn
12
12
  from rich.table import Table
13
13
 
14
- from argus.core import COCODataset, Dataset, YOLODataset
15
- from argus.core.base import TaskType
14
+ from argus.core import COCODataset, Dataset, MaskDataset, YOLODataset
15
+ from argus.core.base import DatasetFormat, TaskType
16
16
  from argus.core.split import (
17
17
  is_coco_unsplit,
18
18
  parse_ratio,
@@ -128,12 +128,19 @@ def stats(
128
128
  dataset = _detect_dataset(dataset_path)
129
129
  if not dataset:
130
130
  console.print(
131
- f"[red]Error: No YOLO or COCO dataset found at {dataset_path}[/red]\n"
131
+ f"[red]Error: No dataset found at {dataset_path}[/red]\n"
132
132
  "[yellow]Ensure the path points to a dataset root containing "
133
- "data.yaml (YOLO) or annotations/ folder (COCO).[/yellow]"
133
+ "data.yaml (YOLO), annotations/ folder (COCO), or "
134
+ "images/ + masks/ directories (Mask).[/yellow]"
134
135
  )
135
136
  raise typer.Exit(1)
136
137
 
138
+ # Handle mask datasets with pixel statistics
139
+ if dataset.format == DatasetFormat.MASK:
140
+ assert isinstance(dataset, MaskDataset)
141
+ _show_mask_stats(dataset, dataset_path)
142
+ return
143
+
137
144
  # Get instance counts with progress indicator
138
145
  with Progress(
139
146
  SpinnerColumn(),
@@ -212,10 +219,100 @@ def stats(
212
219
  else:
213
220
  image_parts.append(f"{split}: {img_total}")
214
221
 
215
- console.print(f"\n[green]Dataset: {dataset.format.value.upper()} | "
216
- f"Task: {dataset.task.value} | "
217
- f"Classes: {len(sorted_classes)} | "
218
- f"Total instances: {grand_total}[/green]")
222
+ console.print(
223
+ f"\n[green]Dataset: {dataset.format.value.upper()} | "
224
+ f"Task: {dataset.task.value} | "
225
+ f"Classes: {len(sorted_classes)} | "
226
+ f"Total instances: {grand_total}[/green]"
227
+ )
228
+
229
+ if image_parts:
230
+ console.print(f"[blue]Images: {' | '.join(image_parts)}[/blue]")
231
+
232
+
233
+ def _show_mask_stats(dataset: MaskDataset, dataset_path: Path) -> None:
234
+ """Show statistics for mask datasets with pixel-level information.
235
+
236
+ Args:
237
+ dataset: The MaskDataset instance.
238
+ dataset_path: Path to the dataset root.
239
+ """
240
+ with Progress(
241
+ SpinnerColumn(),
242
+ TextColumn("[progress.description]{task.description}"),
243
+ console=console,
244
+ transient=True,
245
+ ) as progress:
246
+ progress.add_task("Analyzing mask dataset...", total=None)
247
+ pixel_counts = dataset.get_pixel_counts()
248
+ image_presence = dataset.get_image_class_presence()
249
+ image_counts = dataset.get_image_counts()
250
+
251
+ # Get class mapping
252
+ class_mapping = dataset.get_class_mapping()
253
+
254
+ # Calculate total non-ignored pixels
255
+ total_pixels = sum(
256
+ count
257
+ for class_id, count in pixel_counts.items()
258
+ if class_id != dataset.ignore_index
259
+ )
260
+ ignored_pixels = pixel_counts.get(dataset.ignore_index, 0)
261
+
262
+ # Calculate total images
263
+ total_images = sum(ic["total"] for ic in image_counts.values())
264
+
265
+ # Create table
266
+ splits_str = ", ".join(dataset.splits) if dataset.splits else "unsplit"
267
+ title = f"Class Statistics: {dataset_path.name} ({splits_str})"
268
+ table = Table(title=title)
269
+ table.add_column("Class", style="cyan")
270
+ table.add_column("Total Pixels", justify="right", style="green")
271
+ table.add_column("% Coverage", justify="right", style="magenta")
272
+ table.add_column("Images With", justify="right", style="yellow")
273
+
274
+ # Sort classes by class_id
275
+ sorted_class_ids = sorted(class_mapping.keys())
276
+
277
+ for class_id in sorted_class_ids:
278
+ class_name = class_mapping[class_id]
279
+ pixels = pixel_counts.get(class_id, 0)
280
+ presence = image_presence.get(class_id, 0)
281
+
282
+ # Calculate coverage percentage
283
+ coverage = (pixels / total_pixels * 100) if total_pixels > 0 else 0.0
284
+
285
+ table.add_row(
286
+ class_name,
287
+ f"{pixels:,}",
288
+ f"{coverage:.1f}%",
289
+ str(presence),
290
+ )
291
+
292
+ # Add ignored row if there are ignored pixels
293
+ if ignored_pixels > 0:
294
+ table.add_section()
295
+ table.add_row(
296
+ "[dim](ignored)[/dim]",
297
+ f"[dim]{ignored_pixels:,}[/dim]",
298
+ "[dim]-[/dim]",
299
+ f"[dim]{total_images}[/dim]",
300
+ )
301
+
302
+ console.print(table)
303
+
304
+ # Summary line
305
+ console.print(f"\n[green]Dataset: {dataset_path}[/green]")
306
+ console.print(
307
+ f"[green]Format: {dataset.format.value.upper()} | "
308
+ f"Task: {dataset.task.value}[/green]"
309
+ )
310
+
311
+ # Image counts per split
312
+ image_parts = []
313
+ for split in dataset.splits if dataset.splits else ["unsplit"]:
314
+ if split in image_counts:
315
+ image_parts.append(f"{split}: {image_counts[split]['total']}")
219
316
 
220
317
  if image_parts:
221
318
  console.print(f"[blue]Images: {' | '.join(image_parts)}[/blue]")
@@ -247,6 +344,16 @@ def view(
247
344
  help="Maximum classes to show in grid (classification only).",
248
345
  ),
249
346
  ] = None,
347
+ opacity: Annotated[
348
+ float,
349
+ typer.Option(
350
+ "--opacity",
351
+ "-o",
352
+ help="Mask overlay opacity (0.0-1.0, mask datasets only).",
353
+ min=0.0,
354
+ max=1.0,
355
+ ),
356
+ ] = 0.5,
250
357
  ) -> None:
251
358
  """View annotated images in a dataset.
252
359
 
@@ -295,6 +402,41 @@ def view(
295
402
  # Generate consistent colors for each class
296
403
  class_colors = _generate_class_colors(dataset.class_names)
297
404
 
405
+ # Handle mask datasets with overlay viewer
406
+ if dataset.format == DatasetFormat.MASK:
407
+ assert isinstance(dataset, MaskDataset)
408
+ with Progress(
409
+ SpinnerColumn(),
410
+ TextColumn("[progress.description]{task.description}"),
411
+ console=console,
412
+ transient=True,
413
+ ) as progress:
414
+ progress.add_task("Loading images...", total=None)
415
+ image_paths = dataset.get_image_paths(split)
416
+
417
+ if not image_paths:
418
+ console.print("[yellow]No images found in the dataset.[/yellow]")
419
+ return
420
+
421
+ console.print(
422
+ f"[green]Found {len(image_paths)} images. "
423
+ f"Opening mask viewer...[/green]\n"
424
+ "[dim]Controls: \u2190 / \u2192 or P / N to navigate, "
425
+ "Mouse wheel to zoom, Drag to pan, R to reset, T to toggle overlay, "
426
+ "Q / ESC to quit[/dim]"
427
+ )
428
+
429
+ viewer = _MaskViewer(
430
+ image_paths=image_paths,
431
+ dataset=dataset,
432
+ class_colors=class_colors,
433
+ window_name=f"Argus Mask Viewer - {dataset_path.name}",
434
+ opacity=opacity,
435
+ )
436
+ viewer.run()
437
+ console.print("[green]Viewer closed.[/green]")
438
+ return
439
+
298
440
  # Handle classification datasets with grid viewer
299
441
  if dataset.task == TaskType.CLASSIFICATION:
300
442
  # Use first split if specified, otherwise let get_images_by_class handle it
@@ -599,12 +741,16 @@ class _ImageViewer:
599
741
  info_text += " [Annotations: OFF]"
600
742
 
601
743
  cv2.putText(
602
- display, info_text, (10, 30),
603
- cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2
744
+ display,
745
+ info_text,
746
+ (10, 30),
747
+ cv2.FONT_HERSHEY_SIMPLEX,
748
+ 0.7,
749
+ (255, 255, 255),
750
+ 2,
604
751
  )
605
752
  cv2.putText(
606
- display, info_text, (10, 30),
607
- cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 1
753
+ display, info_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 1
608
754
  )
609
755
 
610
756
  return display
@@ -733,9 +879,11 @@ class _ClassificationGridViewer:
733
879
  self.current_index = 0
734
880
 
735
881
  # Calculate max images across all classes
736
- self.max_images = max(
737
- len(imgs) for imgs in self.images_by_class.values()
738
- ) if self.images_by_class else 0
882
+ self.max_images = (
883
+ max(len(imgs) for imgs in self.images_by_class.values())
884
+ if self.images_by_class
885
+ else 0
886
+ )
739
887
 
740
888
  # Calculate grid layout
741
889
  self.cols, self.rows = self._calculate_grid_layout()
@@ -883,6 +1031,267 @@ class _ClassificationGridViewer:
883
1031
  cv2.destroyAllWindows()
884
1032
 
885
1033
 
1034
+ class _MaskViewer:
1035
+ """Interactive viewer for semantic mask datasets with colored overlay."""
1036
+
1037
+ def __init__(
1038
+ self,
1039
+ image_paths: list[Path],
1040
+ dataset: MaskDataset,
1041
+ class_colors: dict[str, tuple[int, int, int]],
1042
+ window_name: str,
1043
+ opacity: float = 0.5,
1044
+ ):
1045
+ self.image_paths = image_paths
1046
+ self.dataset = dataset
1047
+ self.class_colors = class_colors
1048
+ self.window_name = window_name
1049
+ self.opacity = opacity
1050
+
1051
+ self.current_idx = 0
1052
+ self.zoom = 1.0
1053
+ self.pan_x = 0.0
1054
+ self.pan_y = 0.0
1055
+
1056
+ # Mouse state for panning
1057
+ self.dragging = False
1058
+ self.drag_start_x = 0
1059
+ self.drag_start_y = 0
1060
+ self.pan_start_x = 0.0
1061
+ self.pan_start_y = 0.0
1062
+
1063
+ # Current image cache
1064
+ self.current_img: np.ndarray | None = None
1065
+ self.overlay_img: np.ndarray | None = None
1066
+
1067
+ # Overlay visibility toggle
1068
+ self.show_overlay = True
1069
+
1070
+ # Build class_id to color mapping
1071
+ self._id_to_color: dict[int, tuple[int, int, int]] = {}
1072
+ class_mapping = dataset.get_class_mapping()
1073
+ for class_id, class_name in class_mapping.items():
1074
+ if class_name in class_colors:
1075
+ self._id_to_color[class_id] = class_colors[class_name]
1076
+
1077
+ def _load_current_image(self) -> bool:
1078
+ """Load current image and create mask overlay."""
1079
+ image_path = self.image_paths[self.current_idx]
1080
+
1081
+ img = cv2.imread(str(image_path))
1082
+ if img is None:
1083
+ return False
1084
+
1085
+ mask = self.dataset.load_mask(image_path)
1086
+ if mask is None:
1087
+ console.print(f"[yellow]Warning: No mask for {image_path}[/yellow]")
1088
+ self.current_img = img
1089
+ self.overlay_img = img.copy()
1090
+ return True
1091
+
1092
+ # Validate dimensions
1093
+ if img.shape[:2] != mask.shape[:2]:
1094
+ console.print(
1095
+ f"[red]Error: Dimension mismatch for {image_path.name}: "
1096
+ f"image={img.shape[:2]}, mask={mask.shape[:2]}[/red]"
1097
+ )
1098
+ return False
1099
+
1100
+ self.current_img = img
1101
+ self.overlay_img = self._create_overlay(img, mask)
1102
+ return True
1103
+
1104
+ def _create_overlay(self, img: np.ndarray, mask: np.ndarray) -> np.ndarray:
1105
+ """Create colored overlay from mask.
1106
+
1107
+ Args:
1108
+ img: Original image (BGR).
1109
+ mask: Grayscale mask with class IDs.
1110
+
1111
+ Returns:
1112
+ Image with colored mask overlay.
1113
+ """
1114
+ # Create colored mask
1115
+ h, w = mask.shape
1116
+ colored_mask = np.zeros((h, w, 3), dtype=np.uint8)
1117
+
1118
+ for class_id, color in self._id_to_color.items():
1119
+ colored_mask[mask == class_id] = color
1120
+
1121
+ # Blend with original image
1122
+ # Ignore pixels are fully transparent (not blended)
1123
+ ignore_mask = mask == self.dataset.ignore_index
1124
+ alpha = np.ones((h, w, 1), dtype=np.float32) * self.opacity
1125
+ alpha[ignore_mask] = 0.0
1126
+
1127
+ # Blend: result = img * (1 - alpha) + colored_mask * alpha
1128
+ blended = (
1129
+ img.astype(np.float32) * (1 - alpha)
1130
+ + colored_mask.astype(np.float32) * alpha
1131
+ )
1132
+ return blended.astype(np.uint8)
1133
+
1134
+ def _get_display_image(self) -> np.ndarray:
1135
+ """Get the image transformed for current zoom/pan."""
1136
+ if self.overlay_img is None:
1137
+ return np.zeros((480, 640, 3), dtype=np.uint8)
1138
+
1139
+ if self.show_overlay:
1140
+ img = self.overlay_img
1141
+ elif self.current_img is not None:
1142
+ img = self.current_img
1143
+ else:
1144
+ img = self.overlay_img
1145
+
1146
+ h, w = img.shape[:2]
1147
+
1148
+ if self.zoom == 1.0 and self.pan_x == 0.0 and self.pan_y == 0.0:
1149
+ display = img.copy()
1150
+ else:
1151
+ # Calculate the visible region
1152
+ view_w = int(w / self.zoom)
1153
+ view_h = int(h / self.zoom)
1154
+
1155
+ # Center point with pan offset
1156
+ cx = w / 2 + self.pan_x
1157
+ cy = h / 2 + self.pan_y
1158
+
1159
+ # Calculate crop bounds
1160
+ x1 = int(max(0, cx - view_w / 2))
1161
+ y1 = int(max(0, cy - view_h / 2))
1162
+ x2 = int(min(w, x1 + view_w))
1163
+ y2 = int(min(h, y1 + view_h))
1164
+
1165
+ # Adjust if we hit boundaries
1166
+ if x2 - x1 < view_w:
1167
+ x1 = max(0, x2 - view_w)
1168
+ if y2 - y1 < view_h:
1169
+ y1 = max(0, y2 - view_h)
1170
+
1171
+ # Crop and resize
1172
+ cropped = img[y1:y2, x1:x2]
1173
+ display = cv2.resize(cropped, (w, h), interpolation=cv2.INTER_LINEAR)
1174
+
1175
+ # Add info overlay
1176
+ image_path = self.image_paths[self.current_idx]
1177
+ idx = self.current_idx + 1
1178
+ total = len(self.image_paths)
1179
+ info_text = f"[{idx}/{total}] {image_path.name}"
1180
+ if self.zoom > 1.0:
1181
+ info_text += f" (Zoom: {self.zoom:.1f}x)"
1182
+ if not self.show_overlay:
1183
+ info_text += " [Overlay: OFF]"
1184
+
1185
+ cv2.putText(
1186
+ display,
1187
+ info_text,
1188
+ (10, 30),
1189
+ cv2.FONT_HERSHEY_SIMPLEX,
1190
+ 0.7,
1191
+ (255, 255, 255),
1192
+ 2,
1193
+ )
1194
+ cv2.putText(
1195
+ display, info_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 1
1196
+ )
1197
+
1198
+ return display
1199
+
1200
+ def _mouse_callback(
1201
+ self, event: int, x: int, y: int, flags: int, param: None
1202
+ ) -> None:
1203
+ """Handle mouse events for zoom and pan."""
1204
+ if event == cv2.EVENT_MOUSEWHEEL:
1205
+ # Zoom in/out
1206
+ if flags > 0:
1207
+ self.zoom = min(10.0, self.zoom * 1.2)
1208
+ else:
1209
+ self.zoom = max(1.0, self.zoom / 1.2)
1210
+
1211
+ # Reset pan if zoomed out to 1x
1212
+ if self.zoom == 1.0:
1213
+ self.pan_x = 0.0
1214
+ self.pan_y = 0.0
1215
+
1216
+ elif event == cv2.EVENT_LBUTTONDOWN:
1217
+ self.dragging = True
1218
+ self.drag_start_x = x
1219
+ self.drag_start_y = y
1220
+ self.pan_start_x = self.pan_x
1221
+ self.pan_start_y = self.pan_y
1222
+
1223
+ elif event == cv2.EVENT_MOUSEMOVE and self.dragging:
1224
+ if self.zoom > 1.0 and self.overlay_img is not None:
1225
+ h, w = self.overlay_img.shape[:2]
1226
+ # Calculate pan delta (inverted for natural feel)
1227
+ dx = (self.drag_start_x - x) / self.zoom
1228
+ dy = (self.drag_start_y - y) / self.zoom
1229
+
1230
+ # Update pan with limits
1231
+ max_pan_x = w * (1 - 1 / self.zoom) / 2
1232
+ max_pan_y = h * (1 - 1 / self.zoom) / 2
1233
+
1234
+ self.pan_x = max(-max_pan_x, min(max_pan_x, self.pan_start_x + dx))
1235
+ self.pan_y = max(-max_pan_y, min(max_pan_y, self.pan_start_y + dy))
1236
+
1237
+ elif event == cv2.EVENT_LBUTTONUP:
1238
+ self.dragging = False
1239
+
1240
+ def _reset_view(self) -> None:
1241
+ """Reset zoom and pan to default."""
1242
+ self.zoom = 1.0
1243
+ self.pan_x = 0.0
1244
+ self.pan_y = 0.0
1245
+
1246
+ def _next_image(self) -> None:
1247
+ """Go to next image."""
1248
+ self.current_idx = (self.current_idx + 1) % len(self.image_paths)
1249
+ self._reset_view()
1250
+
1251
+ def _prev_image(self) -> None:
1252
+ """Go to previous image."""
1253
+ self.current_idx = (self.current_idx - 1) % len(self.image_paths)
1254
+ self._reset_view()
1255
+
1256
+ def run(self) -> None:
1257
+ """Run the interactive viewer."""
1258
+ cv2.namedWindow(self.window_name, cv2.WINDOW_AUTOSIZE)
1259
+ cv2.setMouseCallback(self.window_name, self._mouse_callback)
1260
+
1261
+ while True:
1262
+ # Load image if needed
1263
+ if self.overlay_img is None and not self._load_current_image():
1264
+ console.print(
1265
+ f"[yellow]Warning: Could not load "
1266
+ f"{self.image_paths[self.current_idx]}[/yellow]"
1267
+ )
1268
+ self._next_image()
1269
+ continue
1270
+
1271
+ # Display image
1272
+ display = self._get_display_image()
1273
+ cv2.imshow(self.window_name, display)
1274
+
1275
+ # Wait for input (short timeout for smooth panning)
1276
+ key = cv2.waitKey(30) & 0xFF
1277
+
1278
+ # Handle keyboard input
1279
+ if key == ord("q") or key == 27: # Q or ESC
1280
+ break
1281
+ elif key == ord("n") or key == 83 or key == 3: # N or Right arrow
1282
+ self.overlay_img = None
1283
+ self._next_image()
1284
+ elif key == ord("p") or key == 81 or key == 2: # P or Left arrow
1285
+ self.overlay_img = None
1286
+ self._prev_image()
1287
+ elif key == ord("r"): # R to reset zoom
1288
+ self._reset_view()
1289
+ elif key == ord("t"): # T to toggle overlay
1290
+ self.show_overlay = not self.show_overlay
1291
+
1292
+ cv2.destroyAllWindows()
1293
+
1294
+
886
1295
  def _generate_class_colors(class_names: list[str]) -> dict[str, tuple[int, int, int]]:
887
1296
  """Generate consistent colors for each class name.
888
1297
 
@@ -965,9 +1374,13 @@ def _draw_annotations(
965
1374
  )
966
1375
  # Draw label text
967
1376
  cv2.putText(
968
- img, label,
1377
+ img,
1378
+ label,
969
1379
  (x1 + 2, y1 - baseline - 2),
970
- cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1
1380
+ cv2.FONT_HERSHEY_SIMPLEX,
1381
+ 0.5,
1382
+ (255, 255, 255),
1383
+ 1,
971
1384
  )
972
1385
 
973
1386
  return img
@@ -1026,17 +1439,25 @@ def _discover_datasets(root_path: Path, max_depth: int) -> list[Dataset]:
1026
1439
 
1027
1440
 
1028
1441
  def _detect_dataset(path: Path) -> Dataset | None:
1029
- """Try to detect a dataset at the given path."""
1030
- # Try YOLO first (more specific patterns)
1442
+ """Try to detect a dataset at the given path.
1443
+
1444
+ Detection priority: YOLO -> COCO -> MaskDataset
1445
+ """
1446
+ # Try YOLO first (more specific patterns - requires data.yaml)
1031
1447
  dataset = YOLODataset.detect(path)
1032
1448
  if dataset:
1033
1449
  return dataset
1034
1450
 
1035
- # Try COCO
1451
+ # Try COCO (requires annotations/*.json)
1036
1452
  dataset = COCODataset.detect(path)
1037
1453
  if dataset:
1038
1454
  return dataset
1039
1455
 
1456
+ # Try MaskDataset (directory structure based)
1457
+ dataset = MaskDataset.detect(path)
1458
+ if dataset:
1459
+ return dataset
1460
+
1040
1461
  return None
1041
1462
 
1042
1463
 
argus/core/__init__.py CHANGED
@@ -2,6 +2,7 @@
2
2
 
3
3
  from argus.core.base import Dataset
4
4
  from argus.core.coco import COCODataset
5
+ from argus.core.mask import ConfigurationError, MaskDataset
5
6
  from argus.core.split import split_coco_dataset, split_yolo_dataset
6
7
  from argus.core.yolo import YOLODataset
7
8
 
@@ -9,6 +10,8 @@ __all__ = [
9
10
  "Dataset",
10
11
  "YOLODataset",
11
12
  "COCODataset",
13
+ "MaskDataset",
14
+ "ConfigurationError",
12
15
  "split_coco_dataset",
13
16
  "split_yolo_dataset",
14
17
  ]
argus/core/base.py CHANGED
@@ -11,6 +11,7 @@ class DatasetFormat(str, Enum):
11
11
 
12
12
  YOLO = "yolo"
13
13
  COCO = "coco"
14
+ MASK = "mask"
14
15
 
15
16
 
16
17
  class TaskType(str, Enum):
argus/core/coco.py CHANGED
@@ -453,12 +453,14 @@ class COCODataset(Dataset):
453
453
  for i in range(0, len(coords), 2):
454
454
  polygon.append((float(coords[i]), float(coords[i + 1])))
455
455
 
456
- annotations.append({
457
- "class_name": class_name,
458
- "class_id": cat_id,
459
- "bbox": bbox_tuple,
460
- "polygon": polygon,
461
- })
456
+ annotations.append(
457
+ {
458
+ "class_name": class_name,
459
+ "class_id": cat_id,
460
+ "bbox": bbox_tuple,
461
+ "polygon": polygon,
462
+ }
463
+ )
462
464
 
463
465
  except (json.JSONDecodeError, OSError):
464
466
  continue
argus/core/mask.py ADDED
@@ -0,0 +1,648 @@
1
+ """Mask dataset detection and handling for semantic segmentation."""
2
+
3
+ import logging
4
+ from dataclasses import dataclass, field
5
+ from pathlib import Path
6
+
7
+ import cv2
8
+ import numpy as np
9
+ import yaml
10
+
11
+ from argus.core.base import Dataset, DatasetFormat, TaskType
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class ConfigurationError(Exception):
17
+ """Error raised when dataset configuration is invalid."""
18
+
19
+ pass
20
+
21
+
22
+ # Directory patterns for mask dataset detection (checked in order)
23
+ DIRECTORY_PATTERNS = [
24
+ ("images", "masks"),
25
+ ("img", "gt"),
26
+ ("leftImg8bit", "gtFine"),
27
+ ]
28
+
29
+ # Standard image extensions
30
+ IMAGE_EXTENSIONS = {".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".webp"}
31
+
32
+
33
+ @dataclass
34
+ class MaskDataset(Dataset):
35
+ """Dataset format for folder-based semantic segmentation masks.
36
+
37
+ Supports directory structures like:
38
+ - images/ + masks/
39
+ - img/ + gt/
40
+ - leftImg8bit/ + gtFine/ (Cityscapes-style)
41
+
42
+ Each pattern expects parallel split subdirectories:
43
+ dataset/
44
+ ├── images/
45
+ │ ├── train/
46
+ │ └── val/
47
+ ├── masks/
48
+ │ ├── train/
49
+ │ └── val/
50
+ └── classes.yaml # Optional for grayscale, required for RGB
51
+ """
52
+
53
+ images_dir: str = ""
54
+ masks_dir: str = ""
55
+ class_config: dict | None = None
56
+ ignore_index: int | None = 255
57
+ format: DatasetFormat = field(default=DatasetFormat.MASK, init=False)
58
+ task: TaskType = field(default=TaskType.SEGMENTATION, init=False)
59
+
60
+ # Internal: maps class_id -> class_name
61
+ _class_mapping: dict[int, str] = field(default_factory=dict, repr=False)
62
+ # Internal: maps RGB tuple -> (class_id, class_name) for palette masks
63
+ _palette_mapping: dict[tuple[int, int, int], tuple[int, str]] = field(
64
+ default_factory=dict, repr=False
65
+ )
66
+ # Internal: flag for RGB palette mode
67
+ _is_rgb_palette: bool = field(default=False, repr=False)
68
+
69
+ @classmethod
70
+ def detect(cls, path: Path) -> "MaskDataset | None":
71
+ """Detect if the given path contains a mask dataset.
72
+
73
+ Args:
74
+ path: Directory path to check for dataset.
75
+
76
+ Returns:
77
+ MaskDataset instance if detected, None otherwise.
78
+ """
79
+ path = Path(path)
80
+
81
+ if not path.is_dir():
82
+ return None
83
+
84
+ # Try each directory pattern
85
+ for images_name, masks_name in DIRECTORY_PATTERNS:
86
+ images_root = path / images_name
87
+ masks_root = path / masks_name
88
+
89
+ if not (images_root.is_dir() and masks_root.is_dir()):
90
+ continue
91
+
92
+ # Check for split subdirectories with matching structure
93
+ splits = cls._detect_splits(images_root, masks_root)
94
+
95
+ # If no splits found, check if images and masks are directly in root
96
+ if not splits:
97
+ has_images = any(
98
+ f.suffix.lower() in IMAGE_EXTENSIONS for f in images_root.iterdir()
99
+ )
100
+ has_masks = any(
101
+ f.suffix.lower() == ".png" for f in masks_root.iterdir()
102
+ )
103
+
104
+ if has_images and has_masks:
105
+ # Valid unsplit structure
106
+ splits = []
107
+ else:
108
+ continue
109
+
110
+ # Load class configuration if available
111
+ class_config = cls._load_class_config(path)
112
+
113
+ # Determine if masks are grayscale or RGB palette
114
+ is_rgb, palette_mapping = cls._detect_mask_type(path, masks_root, splits)
115
+
116
+ # RGB masks require configuration
117
+ if is_rgb and not class_config:
118
+ raise ConfigurationError(
119
+ f"RGB palette masks detected in {path} but no classes.yaml found. "
120
+ "RGB masks require a classes.yaml config file with palette mapping."
121
+ )
122
+
123
+ # Build class mapping
124
+ class_mapping, ignore_idx = cls._build_class_mapping(
125
+ path, masks_root, splits, class_config, is_rgb, palette_mapping
126
+ )
127
+
128
+ # Extract class names from mapping
129
+ class_names = [
130
+ class_mapping[i]
131
+ for i in sorted(class_mapping.keys())
132
+ if i != ignore_idx
133
+ ]
134
+
135
+ dataset = cls(
136
+ path=path,
137
+ num_classes=len(class_names),
138
+ class_names=class_names,
139
+ splits=splits,
140
+ images_dir=images_name,
141
+ masks_dir=masks_name,
142
+ class_config=class_config,
143
+ ignore_index=ignore_idx,
144
+ )
145
+ dataset._class_mapping = class_mapping
146
+ dataset._palette_mapping = palette_mapping if is_rgb else {}
147
+ dataset._is_rgb_palette = is_rgb
148
+
149
+ return dataset
150
+
151
+ return None
152
+
153
+ @classmethod
154
+ def _detect_splits(cls, images_root: Path, masks_root: Path) -> list[str]:
155
+ """Detect available splits from directory structure.
156
+
157
+ Args:
158
+ images_root: Root directory containing images.
159
+ masks_root: Root directory containing masks.
160
+
161
+ Returns:
162
+ List of split names found in both images and masks directories.
163
+ """
164
+ splits = []
165
+
166
+ for split_name in ["train", "val", "test"]:
167
+ images_split = images_root / split_name
168
+ masks_split = masks_root / split_name
169
+
170
+ if images_split.is_dir() and masks_split.is_dir():
171
+ # Verify there are actual files
172
+ has_images = any(
173
+ f.suffix.lower() in IMAGE_EXTENSIONS for f in images_split.iterdir()
174
+ )
175
+ has_masks = any(
176
+ f.suffix.lower() == ".png" for f in masks_split.iterdir()
177
+ )
178
+
179
+ if has_images and has_masks:
180
+ splits.append(split_name)
181
+
182
+ return splits
183
+
184
+ @classmethod
185
+ def _load_class_config(cls, path: Path) -> dict | None:
186
+ """Load classes.yaml configuration if present.
187
+
188
+ Args:
189
+ path: Dataset root path.
190
+
191
+ Returns:
192
+ Parsed config dict or None if not found.
193
+ """
194
+ config_path = path / "classes.yaml"
195
+ if not config_path.exists():
196
+ config_path = path / "classes.yml"
197
+
198
+ if not config_path.exists():
199
+ return None
200
+
201
+ try:
202
+ with open(config_path, encoding="utf-8") as f:
203
+ return yaml.safe_load(f)
204
+ except (yaml.YAMLError, OSError) as e:
205
+ raise ConfigurationError(f"Failed to parse {config_path}: {e}") from e
206
+
207
+ @classmethod
208
+ def _detect_mask_type(
209
+ cls,
210
+ path: Path,
211
+ masks_root: Path,
212
+ splits: list[str],
213
+ ) -> tuple[bool, dict[tuple[int, int, int], tuple[int, str]]]:
214
+ """Detect if masks are grayscale or RGB palette format.
215
+
216
+ Args:
217
+ path: Dataset root path.
218
+ masks_root: Masks directory root.
219
+ splits: List of split names.
220
+
221
+ Returns:
222
+ Tuple of (is_rgb_palette, palette_mapping).
223
+ """
224
+ # Find sample mask files
225
+ sample_masks: list[Path] = []
226
+
227
+ if splits:
228
+ for split in splits[:1]: # Just check first split
229
+ split_dir = masks_root / split
230
+ sample_masks.extend(list(split_dir.glob("*.png"))[:5])
231
+ else:
232
+ sample_masks.extend(list(masks_root.glob("*.png"))[:5])
233
+
234
+ if not sample_masks:
235
+ return False, {}
236
+
237
+ # Check first mask
238
+ mask = cv2.imread(str(sample_masks[0]), cv2.IMREAD_UNCHANGED)
239
+ if mask is None:
240
+ return False, {}
241
+
242
+ # Check if grayscale (single channel) or RGB (3 channels)
243
+ if len(mask.shape) == 2 or mask.shape[2] == 1:
244
+ return False, {}
245
+ elif mask.shape[2] >= 3:
246
+ return True, {}
247
+
248
+ return False, {}
249
+
250
+ @classmethod
251
+ def _build_class_mapping(
252
+ cls,
253
+ path: Path,
254
+ masks_root: Path,
255
+ splits: list[str],
256
+ class_config: dict | None,
257
+ is_rgb: bool,
258
+ palette_mapping: dict,
259
+ ) -> tuple[dict[int, str], int]:
260
+ """Build class ID to name mapping.
261
+
262
+ Args:
263
+ path: Dataset root path.
264
+ masks_root: Masks directory root.
265
+ splits: List of split names.
266
+ class_config: Parsed classes.yaml or None.
267
+ is_rgb: Whether masks use RGB palette encoding.
268
+ palette_mapping: RGB to class mapping (if RGB).
269
+
270
+ Returns:
271
+ Tuple of (class_id_to_name dict, ignore_index or None).
272
+ """
273
+ ignore_index: int | None = 255
274
+
275
+ if class_config:
276
+ # Get ignore index from config (can be null/None to disable)
277
+ ignore_index = class_config.get("ignore_index", 255)
278
+
279
+ # Try to get names from config
280
+ if "names" in class_config:
281
+ names = class_config["names"]
282
+ if isinstance(names, dict):
283
+ return {int(k): v for k, v in names.items()}, ignore_index
284
+ elif isinstance(names, list):
285
+ return {i: name for i, name in enumerate(names)}, ignore_index
286
+
287
+ # If RGB palette, build from palette config
288
+ if is_rgb and "palette" in class_config:
289
+ mapping = {}
290
+ for entry in class_config["palette"]:
291
+ class_id = entry["id"]
292
+ class_name = entry["name"]
293
+ mapping[class_id] = class_name
294
+ return mapping, ignore_index
295
+
296
+ # Auto-detect classes from grayscale masks
297
+ if not is_rgb:
298
+ return cls._auto_detect_classes(masks_root, splits, ignore_index)
299
+
300
+ # RGB without config - error should have been raised earlier
301
+ return {}, ignore_index
302
+
303
+ @classmethod
304
+ def _auto_detect_classes(
305
+ cls,
306
+ masks_root: Path,
307
+ splits: list[str],
308
+ ignore_index: int | None,
309
+ ) -> tuple[dict[int, str], int | None]:
310
+ """Auto-detect class IDs from grayscale mask values.
311
+
312
+ Args:
313
+ masks_root: Masks directory root.
314
+ splits: List of split names.
315
+ ignore_index: Index to treat as ignored/void, or None.
316
+
317
+ Returns:
318
+ Tuple of (class_id_to_name dict, ignore_index or None).
319
+ """
320
+ unique_values: set[int] = set()
321
+
322
+ # Sample masks to find unique values
323
+ sample_masks: list[Path] = []
324
+
325
+ if splits:
326
+ for split in splits:
327
+ split_dir = masks_root / split
328
+ sample_masks.extend(list(split_dir.glob("*.png"))[:20])
329
+ else:
330
+ sample_masks.extend(list(masks_root.glob("*.png"))[:20])
331
+
332
+ for mask_path in sample_masks[:50]: # Limit sampling
333
+ mask = cv2.imread(str(mask_path), cv2.IMREAD_GRAYSCALE)
334
+ if mask is not None:
335
+ unique_values.update(np.unique(mask).tolist())
336
+
337
+ # Build mapping with auto-generated names
338
+ mapping = {}
339
+ for val in sorted(unique_values):
340
+ if val == ignore_index:
341
+ continue
342
+ mapping[val] = f"class_{val}"
343
+
344
+ return mapping, ignore_index
345
+
346
+ def get_image_paths(self, split: str | None = None) -> list[Path]:
347
+ """Get all image file paths for a split or the entire dataset.
348
+
349
+ Only returns images that have corresponding masks.
350
+
351
+ Args:
352
+ split: Specific split to get images from. If None, returns all images.
353
+
354
+ Returns:
355
+ List of image file paths sorted alphabetically.
356
+ """
357
+ images_root = self.path / self.images_dir
358
+ image_paths: list[Path] = []
359
+
360
+ splits_to_search = (
361
+ [split] if split else (self.splits if self.splits else [None])
362
+ )
363
+
364
+ for s in splits_to_search:
365
+ image_dir = images_root / s if s else images_root
366
+
367
+ if not image_dir.is_dir():
368
+ continue
369
+
370
+ for img_file in image_dir.iterdir():
371
+ if img_file.suffix.lower() not in IMAGE_EXTENSIONS:
372
+ continue
373
+
374
+ # Only include if mask exists
375
+ mask_path = self.get_mask_path(img_file)
376
+ if mask_path and mask_path.exists():
377
+ image_paths.append(img_file)
378
+ else:
379
+ logger.warning(f"No mask found for image: {img_file}")
380
+
381
+ return sorted(image_paths, key=lambda p: p.name)
382
+
383
+ def get_mask_path(self, image_path: Path) -> Path | None:
384
+ """Return corresponding mask path for an image.
385
+
386
+ Tries multiple naming conventions in order:
387
+ 1. Exact stem match: image.jpg -> image.png
388
+ 2. With _mask suffix: image.jpg -> image_mask.png
389
+ 3. With _gt suffix: image.jpg -> image_gt.png
390
+
391
+ Args:
392
+ image_path: Path to the image file.
393
+
394
+ Returns:
395
+ Path to corresponding mask, or None if not found.
396
+ """
397
+ stem = image_path.stem
398
+
399
+ # Determine the split from image path
400
+ image_parts = image_path.parts
401
+ images_dir_idx = None
402
+ for i, part in enumerate(image_parts):
403
+ if part == self.images_dir:
404
+ images_dir_idx = i
405
+ break
406
+
407
+ if images_dir_idx is None:
408
+ # Fallback: look in masks root
409
+ masks_dir = self.path / self.masks_dir
410
+ return self._find_mask_with_patterns(masks_dir, stem)
411
+
412
+ # Build mask directory path with same structure
413
+ mask_parts = list(image_parts[:-1]) # Exclude filename
414
+ mask_parts[images_dir_idx] = self.masks_dir
415
+ masks_dir = Path(*mask_parts)
416
+
417
+ return self._find_mask_with_patterns(masks_dir, stem)
418
+
419
+ def _find_mask_with_patterns(self, masks_dir: Path, stem: str) -> Path | None:
420
+ """Find mask file trying multiple naming patterns.
421
+
422
+ Args:
423
+ masks_dir: Directory containing masks.
424
+ stem: Image filename stem (without extension).
425
+
426
+ Returns:
427
+ Path to mask if found, None otherwise.
428
+ """
429
+ # Try different naming patterns in order of preference
430
+ patterns = [
431
+ f"{stem}.png", # Exact match
432
+ f"{stem}_mask.png", # Common _mask suffix
433
+ f"{stem}_gt.png", # Ground truth suffix
434
+ f"{stem}_label.png", # Label suffix
435
+ ]
436
+
437
+ for pattern in patterns:
438
+ mask_path = masks_dir / pattern
439
+ if mask_path.exists():
440
+ return mask_path
441
+
442
+ return None
443
+
444
+ def get_class_mapping(self) -> dict[int, str]:
445
+ """Return class ID to name mapping.
446
+
447
+ Returns:
448
+ Dictionary mapping class IDs to class names.
449
+ """
450
+ return self._class_mapping.copy()
451
+
452
+ def get_instance_counts(self) -> dict[str, dict[str, int]]:
453
+ """Get pixel counts per class, per split.
454
+
455
+ For mask datasets, this returns the total number of pixels
456
+ for each class across all masks in each split.
457
+
458
+ Returns:
459
+ Dictionary mapping split name to dict of class name to pixel count.
460
+ """
461
+ counts: dict[str, dict[str, int]] = {}
462
+
463
+ splits_to_process = self.splits if self.splits else ["unsplit"]
464
+ masks_root = self.path / self.masks_dir
465
+
466
+ for split in splits_to_process:
467
+ split_counts: dict[str, int] = {name: 0 for name in self.class_names}
468
+
469
+ mask_dir = masks_root if split == "unsplit" else masks_root / split
470
+
471
+ if not mask_dir.is_dir():
472
+ continue
473
+
474
+ for mask_path in mask_dir.glob("*.png"):
475
+ mask = cv2.imread(str(mask_path), cv2.IMREAD_GRAYSCALE)
476
+ if mask is None:
477
+ logger.warning(f"Could not read mask: {mask_path}")
478
+ continue
479
+
480
+ # Count pixels for each class
481
+ unique, pixel_counts = np.unique(mask, return_counts=True)
482
+
483
+ for class_id, count in zip(unique, pixel_counts, strict=True):
484
+ if class_id == self.ignore_index:
485
+ continue
486
+ class_name = self._class_mapping.get(class_id)
487
+ if class_name and class_name in split_counts:
488
+ split_counts[class_name] += int(count)
489
+
490
+ counts[split] = split_counts
491
+
492
+ return counts
493
+
494
+ def get_image_counts(self) -> dict[str, dict[str, int]]:
495
+ """Get image counts per split.
496
+
497
+ Returns:
498
+ Dictionary mapping split name to dict with "total" and "background" counts.
499
+ For mask datasets, "background" is always 0.
500
+ """
501
+ counts: dict[str, dict[str, int]] = {}
502
+
503
+ splits_to_process = self.splits if self.splits else ["unsplit"]
504
+
505
+ for split in splits_to_process:
506
+ image_paths = self.get_image_paths(split if split != "unsplit" else None)
507
+ counts[split] = {"total": len(image_paths), "background": 0}
508
+
509
+ return counts
510
+
511
+ def get_image_class_presence(self, split: str | None = None) -> dict[int, int]:
512
+ """Return count of images containing each class.
513
+
514
+ Args:
515
+ split: Specific split to analyze. If None, analyzes all splits.
516
+
517
+ Returns:
518
+ Dictionary mapping class ID to count of images containing that class.
519
+ """
520
+ presence: dict[int, int] = {class_id: 0 for class_id in self._class_mapping}
521
+
522
+ splits_to_process = (
523
+ [split] if split else (self.splits if self.splits else [None])
524
+ )
525
+ masks_root = self.path / self.masks_dir
526
+
527
+ for s in splits_to_process:
528
+ mask_dir = masks_root / s if s else masks_root
529
+
530
+ if not mask_dir.is_dir():
531
+ continue
532
+
533
+ for mask_path in mask_dir.glob("*.png"):
534
+ mask = cv2.imread(str(mask_path), cv2.IMREAD_GRAYSCALE)
535
+ if mask is None:
536
+ continue
537
+
538
+ # Find which classes are present in this mask
539
+ unique_values = np.unique(mask)
540
+ for class_id in unique_values:
541
+ if class_id != self.ignore_index and class_id in presence:
542
+ presence[class_id] += 1
543
+
544
+ return presence
545
+
546
+ def get_pixel_counts(self, split: str | None = None) -> dict[int, int]:
547
+ """Return total pixel count per class ID.
548
+
549
+ Args:
550
+ split: Specific split to analyze. If None, analyzes all splits.
551
+
552
+ Returns:
553
+ Dictionary mapping class ID to total pixel count.
554
+ """
555
+ pixel_counts: dict[int | None, int] = {
556
+ class_id: 0 for class_id in self._class_mapping
557
+ }
558
+ # Track ignored pixels if ignore_index is set
559
+ if self.ignore_index is not None:
560
+ pixel_counts[self.ignore_index] = 0
561
+
562
+ splits_to_process = (
563
+ [split] if split else (self.splits if self.splits else [None])
564
+ )
565
+ masks_root = self.path / self.masks_dir
566
+
567
+ for s in splits_to_process:
568
+ mask_dir = masks_root / s if s else masks_root
569
+
570
+ if not mask_dir.is_dir():
571
+ continue
572
+
573
+ for mask_path in mask_dir.glob("*.png"):
574
+ mask = cv2.imread(str(mask_path), cv2.IMREAD_GRAYSCALE)
575
+ if mask is None:
576
+ continue
577
+
578
+ unique, counts = np.unique(mask, return_counts=True)
579
+ for class_id, count in zip(unique, counts, strict=True):
580
+ class_id_int = int(class_id)
581
+ if class_id_int in pixel_counts:
582
+ pixel_counts[class_id_int] += int(count)
583
+ elif (
584
+ self.ignore_index is not None
585
+ and class_id_int == self.ignore_index
586
+ ):
587
+ pixel_counts[self.ignore_index] += int(count)
588
+
589
+ return pixel_counts
590
+
591
+ def get_annotations_for_image(self, image_path: Path) -> list[dict]:
592
+ """Get annotations for a specific image.
593
+
594
+ For mask datasets, this returns an empty list since annotations
595
+ are stored as pixel masks rather than discrete objects.
596
+ Use get_mask_path() to get the mask file directly.
597
+
598
+ Args:
599
+ image_path: Path to the image file.
600
+
601
+ Returns:
602
+ Empty list (masks don't have discrete annotations).
603
+ """
604
+ # Mask datasets don't have discrete annotations like detection/segmentation
605
+ # The mask itself IS the annotation
606
+ return []
607
+
608
+ def load_mask(self, image_path: Path) -> np.ndarray | None:
609
+ """Load the mask for a given image.
610
+
611
+ Args:
612
+ image_path: Path to the image file.
613
+
614
+ Returns:
615
+ Mask as numpy array (grayscale), or None if not found.
616
+ """
617
+ mask_path = self.get_mask_path(image_path)
618
+ if mask_path is None or not mask_path.exists():
619
+ return None
620
+
621
+ mask = cv2.imread(str(mask_path), cv2.IMREAD_GRAYSCALE)
622
+ return mask
623
+
624
+ def validate_dimensions(
625
+ self, image_path: Path
626
+ ) -> tuple[bool, tuple[int, int] | None, tuple[int, int] | None]:
627
+ """Check if image and mask dimensions match.
628
+
629
+ Args:
630
+ image_path: Path to the image file.
631
+
632
+ Returns:
633
+ Tuple of (dimensions_match, image_shape, mask_shape).
634
+ """
635
+ mask_path = self.get_mask_path(image_path)
636
+ if mask_path is None or not mask_path.exists():
637
+ return False, None, None
638
+
639
+ img = cv2.imread(str(image_path))
640
+ mask = cv2.imread(str(mask_path), cv2.IMREAD_GRAYSCALE)
641
+
642
+ if img is None or mask is None:
643
+ return False, None, None
644
+
645
+ img_shape = (img.shape[0], img.shape[1])
646
+ mask_shape = (mask.shape[0], mask.shape[1])
647
+
648
+ return img_shape == mask_shape, img_shape, mask_shape
argus/core/yolo.py CHANGED
@@ -89,6 +89,11 @@ class YOLODataset(Dataset):
89
89
  if "names" not in config:
90
90
  continue
91
91
 
92
+ # Skip if this looks like a mask dataset config
93
+ # (has ignore_index or palette keys which are mask-specific)
94
+ if "ignore_index" in config or "palette" in config:
95
+ continue
96
+
92
97
  names = config["names"]
93
98
 
94
99
  # Extract class names
@@ -692,12 +697,14 @@ class YOLODataset(Dataset):
692
697
  x = x_center - width / 2
693
698
  y = y_center - height / 2
694
699
 
695
- annotations.append({
696
- "class_name": class_name,
697
- "class_id": class_id,
698
- "bbox": (x, y, width, height),
699
- "polygon": None,
700
- })
700
+ annotations.append(
701
+ {
702
+ "class_name": class_name,
703
+ "class_id": class_id,
704
+ "bbox": (x, y, width, height),
705
+ "polygon": None,
706
+ }
707
+ )
701
708
  else:
702
709
  # Segmentation: class x1 y1 x2 y2 ... xn yn
703
710
  coords = [float(p) for p in parts[1:]]
@@ -715,12 +722,14 @@ class YOLODataset(Dataset):
715
722
  width = max(xs) - x
716
723
  height = max(ys) - y
717
724
 
718
- annotations.append({
719
- "class_name": class_name,
720
- "class_id": class_id,
721
- "bbox": (x, y, width, height),
722
- "polygon": polygon,
723
- })
725
+ annotations.append(
726
+ {
727
+ "class_name": class_name,
728
+ "class_id": class_id,
729
+ "bbox": (x, y, width, height),
730
+ "polygon": polygon,
731
+ }
732
+ )
724
733
 
725
734
  except (ValueError, IndexError):
726
735
  continue
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: argus-cv
3
- Version: 1.2.0
3
+ Version: 1.3.0
4
4
  Summary: CLI tool for working with vision AI datasets
5
5
  Requires-Python: >=3.10
6
6
  Requires-Dist: numpy>=1.24.0
@@ -0,0 +1,14 @@
1
+ argus/__init__.py,sha256=277ASQvH6ZWVWUzOCVB7vAxn3OYciow4nVkjG16-kio,64
2
+ argus/__main__.py,sha256=63ezHx8eL_lCMoZrCbKhmpao0fmdvYVw1chbknGg-oI,104
3
+ argus/cli.py,sha256=th1Rgn1Sm9juWoavopEXLBT8XEh5lKzOMX-pccwvDgA,47904
4
+ argus/commands/__init__.py,sha256=i2oor9hpVpF-_1qZWCGDLwwi1pZGJfZnUKJZ_NMBG18,30
5
+ argus/core/__init__.py,sha256=II2wYJpGUUGGKOFZ5BCpMIBTfv0WP-F15U_xbpWGjk8,453
6
+ argus/core/base.py,sha256=WBrB7XWz125YZ1UQfHQwsYAuIFY_XGEhG_0ybgPhn6s,3696
7
+ argus/core/coco.py,sha256=atVurZV2T7cszydyD9GfDTWHGYDd-JNK5RD7nse5avc,15823
8
+ argus/core/mask.py,sha256=m7Ztf4lAZx5ITpk3F3mETcvCC6hGydlxK0-2nCjeTfU,21835
9
+ argus/core/split.py,sha256=kEWtbdg6bH-WiNFf83HkqZD90EL4gsavw6JiefuAETs,10776
10
+ argus/core/yolo.py,sha256=tTc9jJzXcwa8LQ_s8nv-D_i2b9k_x-LT1O0eWr4sZ2k,28616
11
+ argus_cv-1.3.0.dist-info/METADATA,sha256=1CCYLSnGHaAdS5jjwUuTJWRHu5OM_oFfvi1L_v3SkQw,1070
12
+ argus_cv-1.3.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
13
+ argus_cv-1.3.0.dist-info/entry_points.txt,sha256=dvJFH7BkrOxJnifSjPhwq1YCafPaqdngWyBuFYE73yY,43
14
+ argus_cv-1.3.0.dist-info/RECORD,,
@@ -1,13 +0,0 @@
1
- argus/__init__.py,sha256=e8XoTWOOBcvGAX8HIUZFycf6Yi3IMFZfXSj4oiK2Cwg,64
2
- argus/__main__.py,sha256=63ezHx8eL_lCMoZrCbKhmpao0fmdvYVw1chbknGg-oI,104
3
- argus/cli.py,sha256=SHAN07n1ffqWil2qhLkc0xIdU7iXhjTPW8nFMDUhnNQ,34103
4
- argus/commands/__init__.py,sha256=i2oor9hpVpF-_1qZWCGDLwwi1pZGJfZnUKJZ_NMBG18,30
5
- argus/core/__init__.py,sha256=Plv_tk0Wq9OlGLDPOSQWxrd5cTwNK9kEZANTim3s23A,348
6
- argus/core/base.py,sha256=Vd_2xR6L3lhu9vHoyLeFTc0Dg59py_D9kaye1tta5Co,3678
7
- argus/core/coco.py,sha256=bJvOhBzwjsOU8DBijGDysnSPlprwetkPf4Z02UOmqw0,15757
8
- argus/core/split.py,sha256=kEWtbdg6bH-WiNFf83HkqZD90EL4gsavw6JiefuAETs,10776
9
- argus/core/yolo.py,sha256=W7WH7RwNYvJu8nYdNcXVNRkK5IBov-1dG23esOkCB1M,28213
10
- argus_cv-1.2.0.dist-info/METADATA,sha256=SP_8kVPy0Mv_9F8CM9zWTARqCOmzYFetyF7QaEgNT-Y,1070
11
- argus_cv-1.2.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
12
- argus_cv-1.2.0.dist-info/entry_points.txt,sha256=dvJFH7BkrOxJnifSjPhwq1YCafPaqdngWyBuFYE73yY,43
13
- argus_cv-1.2.0.dist-info/RECORD,,