dgenerate-ultralytics-headless 8.3.196__py3-none-any.whl → 8.3.198__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.198.dist-info}/METADATA +1 -1
  2. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.198.dist-info}/RECORD +46 -45
  3. tests/test_engine.py +9 -1
  4. ultralytics/__init__.py +1 -1
  5. ultralytics/cfg/__init__.py +0 -1
  6. ultralytics/cfg/datasets/construction-ppe.yaml +32 -0
  7. ultralytics/cfg/default.yaml +96 -94
  8. ultralytics/cfg/trackers/botsort.yaml +16 -17
  9. ultralytics/cfg/trackers/bytetrack.yaml +9 -11
  10. ultralytics/data/augment.py +1 -1
  11. ultralytics/data/dataset.py +1 -1
  12. ultralytics/engine/exporter.py +36 -35
  13. ultralytics/engine/model.py +1 -2
  14. ultralytics/engine/predictor.py +1 -2
  15. ultralytics/engine/results.py +1 -1
  16. ultralytics/engine/trainer.py +8 -10
  17. ultralytics/engine/tuner.py +54 -32
  18. ultralytics/models/sam/modules/decoders.py +3 -3
  19. ultralytics/models/sam/modules/sam.py +5 -5
  20. ultralytics/models/sam/predict.py +11 -11
  21. ultralytics/models/yolo/classify/train.py +2 -7
  22. ultralytics/models/yolo/classify/val.py +2 -2
  23. ultralytics/models/yolo/detect/predict.py +1 -1
  24. ultralytics/models/yolo/detect/train.py +1 -11
  25. ultralytics/models/yolo/detect/val.py +4 -4
  26. ultralytics/models/yolo/obb/val.py +3 -3
  27. ultralytics/models/yolo/pose/predict.py +1 -1
  28. ultralytics/models/yolo/pose/train.py +0 -7
  29. ultralytics/models/yolo/pose/val.py +2 -2
  30. ultralytics/models/yolo/segment/predict.py +2 -2
  31. ultralytics/models/yolo/segment/train.py +0 -6
  32. ultralytics/models/yolo/segment/val.py +13 -11
  33. ultralytics/models/yolo/yoloe/val.py +1 -1
  34. ultralytics/nn/modules/block.py +1 -1
  35. ultralytics/nn/modules/head.py +1 -2
  36. ultralytics/nn/tasks.py +2 -2
  37. ultralytics/utils/checks.py +1 -1
  38. ultralytics/utils/loss.py +1 -2
  39. ultralytics/utils/metrics.py +6 -6
  40. ultralytics/utils/nms.py +8 -14
  41. ultralytics/utils/plotting.py +22 -36
  42. ultralytics/utils/torch_utils.py +9 -27
  43. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.198.dist-info}/WHEEL +0 -0
  44. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.198.dist-info}/entry_points.txt +0 -0
  45. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.198.dist-info}/licenses/LICENSE +0 -0
  46. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.198.dist-info}/top_level.txt +0 -0
@@ -812,14 +812,13 @@ def plot_images(
812
812
 
813
813
  # Plot masks
814
814
  if len(masks):
815
- if idx.shape[0] == masks.shape[0]: # overlap_mask=False
815
+ if idx.shape[0] == masks.shape[0] and masks.max() <= 1: # overlap_mask=False
816
816
  image_masks = masks[idx]
817
817
  else: # overlap_mask=True
818
818
  image_masks = masks[[i]] # (1, 640, 640)
819
819
  nl = idx.sum()
820
- index = np.arange(nl).reshape((nl, 1, 1)) + 1
821
- image_masks = np.repeat(image_masks, nl, axis=0)
822
- image_masks = np.where(image_masks == index, 1.0, 0.0)
820
+ index = np.arange(1, nl + 1).reshape((nl, 1, 1))
821
+ image_masks = (image_masks == index).astype(np.float32)
823
822
 
824
823
  im = np.asarray(annotator.im).copy()
825
824
  for j in range(len(image_masks)):
@@ -847,14 +846,7 @@ def plot_images(
847
846
 
848
847
 
849
848
  @plt_settings()
850
- def plot_results(
851
- file: str = "path/to/results.csv",
852
- dir: str = "",
853
- segment: bool = False,
854
- pose: bool = False,
855
- classify: bool = False,
856
- on_plot: Callable | None = None,
857
- ):
849
+ def plot_results(file: str = "path/to/results.csv", dir: str = "", on_plot: Callable | None = None):
858
850
  """
859
851
  Plot training results from a results CSV file. The function supports various types of data including segmentation,
860
852
  pose estimation, and classification. Plots are saved as 'results.png' in the directory where the CSV is located.
@@ -862,9 +854,6 @@ def plot_results(
862
854
  Args:
863
855
  file (str, optional): Path to the CSV file containing the training results.
864
856
  dir (str, optional): Directory where the CSV file is located if 'file' is not provided.
865
- segment (bool, optional): Flag to indicate if the data is for segmentation.
866
- pose (bool, optional): Flag to indicate if the data is for pose estimation.
867
- classify (bool, optional): Flag to indicate if the data is for classification.
868
857
  on_plot (callable, optional): Callback function to be executed after plotting. Takes filename as an argument.
869
858
 
870
859
  Examples:
@@ -876,34 +865,31 @@ def plot_results(
876
865
  from scipy.ndimage import gaussian_filter1d
877
866
 
878
867
  save_dir = Path(file).parent if file else Path(dir)
879
- if classify:
880
- fig, ax = plt.subplots(2, 2, figsize=(6, 6), tight_layout=True)
881
- index = [2, 5, 3, 4]
882
- elif segment:
883
- fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True)
884
- index = [2, 3, 4, 5, 6, 7, 10, 11, 14, 15, 16, 17, 8, 9, 12, 13]
885
- elif pose:
886
- fig, ax = plt.subplots(2, 9, figsize=(21, 6), tight_layout=True)
887
- index = [2, 3, 4, 5, 6, 7, 8, 11, 12, 15, 16, 17, 18, 19, 9, 10, 13, 14]
888
- else:
889
- fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
890
- index = [2, 3, 4, 5, 6, 9, 10, 11, 7, 8]
891
- ax = ax.ravel()
892
868
  files = list(save_dir.glob("results*.csv"))
893
869
  assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot."
894
- for f in files:
870
+
871
+ loss_keys, metric_keys = [], []
872
+ for i, f in enumerate(files):
895
873
  try:
896
874
  data = pl.read_csv(f, infer_schema_length=None)
897
- s = [x.strip() for x in data.columns]
875
+ if i == 0:
876
+ for c in data.columns:
877
+ if "loss" in c:
878
+ loss_keys.append(c)
879
+ elif "metric" in c:
880
+ metric_keys.append(c)
881
+ loss_mid, metric_mid = len(loss_keys) // 2, len(metric_keys) // 2
882
+ columns = (
883
+ loss_keys[:loss_mid] + metric_keys[:metric_mid] + loss_keys[loss_mid:] + metric_keys[metric_mid:]
884
+ )
885
+ fig, ax = plt.subplots(2, len(columns) // 2, figsize=(len(columns) + 2, 6), tight_layout=True)
886
+ ax = ax.ravel()
898
887
  x = data.select(data.columns[0]).to_numpy().flatten()
899
- for i, j in enumerate(index):
900
- y = data.select(data.columns[j]).to_numpy().flatten().astype("float")
901
- # y[y == 0] = np.nan # don't show zero values
888
+ for i, j in enumerate(columns):
889
+ y = data.select(j).to_numpy().flatten().astype("float")
902
890
  ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=8) # actual results
903
891
  ax[i].plot(x, gaussian_filter1d(y, sigma=3), ":", label="smooth", linewidth=2) # smoothing line
904
- ax[i].set_title(s[j], fontsize=12)
905
- # if j in {8, 9, 10}: # share train and val loss y axes
906
- # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
892
+ ax[i].set_title(j, fontsize=12)
907
893
  except Exception as e:
908
894
  LOGGER.error(f"Plotting error for {f}: {e}")
909
895
  ax[1].legend()
@@ -1006,35 +1006,13 @@ class FXModel(nn.Module):
1006
1006
  return x
1007
1007
 
1008
1008
 
1009
- def disable_dynamo(func: Any) -> Any:
1010
- """
1011
- Disable torch.compile/dynamo for a callable when available.
1012
-
1013
- Args:
1014
- func (Any): Callable object to wrap. Could be a function, method, or class.
1015
-
1016
- Returns:
1017
- func (Any): Same callable, wrapped by torch._dynamo.disable when available, otherwise unchanged.
1018
-
1019
- Examples:
1020
- >>> @disable_dynamo
1021
- ... def fn(x):
1022
- ... return x + 1
1023
- >>> # Works even if torch._dynamo is not available
1024
- >>> _ = fn(1)
1025
- """
1026
- if hasattr(torch, "_dynamo"):
1027
- return torch._dynamo.disable(func)
1028
- return func
1029
-
1030
-
1031
1009
  def attempt_compile(
1032
1010
  model: torch.nn.Module,
1033
1011
  device: torch.device,
1034
1012
  imgsz: int = 640,
1035
1013
  use_autocast: bool = False,
1036
1014
  warmup: bool = False,
1037
- prefix: str = colorstr("compile:"),
1015
+ mode: bool | str = "default",
1038
1016
  ) -> torch.nn.Module:
1039
1017
  """
1040
1018
  Compile a model with torch.compile and optionally warm up the graph to reduce first-iteration latency.
@@ -1049,7 +1027,8 @@ def attempt_compile(
1049
1027
  imgsz (int, optional): Square input size to create a dummy tensor with shape (1, 3, imgsz, imgsz) for warmup.
1050
1028
  use_autocast (bool, optional): Whether to run warmup under autocast on CUDA or MPS devices.
1051
1029
  warmup (bool, optional): Whether to execute a single dummy forward pass to warm up the compiled model.
1052
- prefix (str, optional): Message prefix for logger output.
1030
+ mode (bool | str, optional): torch.compile mode. True "default", False → no compile, or a string like
1031
+ "default", "reduce-overhead", "max-autotune".
1053
1032
 
1054
1033
  Returns:
1055
1034
  model (torch.nn.Module): Compiled model if compilation succeeds, otherwise the original unmodified model.
@@ -1064,13 +1043,16 @@ def attempt_compile(
1064
1043
  >>> # Try to compile and warm up a model with a 640x640 input
1065
1044
  >>> model = attempt_compile(model, device=device, imgsz=640, use_autocast=True, warmup=True)
1066
1045
  """
1067
- if not hasattr(torch, "compile"):
1046
+ if not hasattr(torch, "compile") or not mode:
1068
1047
  return model
1069
1048
 
1070
- LOGGER.info(f"{prefix} starting torch.compile...")
1049
+ if mode is True:
1050
+ mode = "default"
1051
+ prefix = colorstr("compile:")
1052
+ LOGGER.info(f"{prefix} starting torch.compile with '{mode}' mode...")
1071
1053
  t0 = time.perf_counter()
1072
1054
  try:
1073
- model = torch.compile(model, mode="max-autotune", backend="inductor")
1055
+ model = torch.compile(model, mode=mode, backend="inductor")
1074
1056
  except Exception as e:
1075
1057
  LOGGER.warning(f"{prefix} torch.compile failed, continuing uncompiled: {e}")
1076
1058
  return model