ultralytics 8.2.103__py3-none-any.whl → 8.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

tests/__init__.py CHANGED
@@ -3,8 +3,8 @@
3
3
  from ultralytics.utils import ASSETS, ROOT, WEIGHTS_DIR, checks
4
4
 
5
5
  # Constants used in tests
6
- MODEL = WEIGHTS_DIR / "path with spaces" / "yolov8n.pt" # test spaces in path
7
- CFG = "yolov8n.yaml"
6
+ MODEL = WEIGHTS_DIR / "path with spaces" / "yolo11n.pt" # test spaces in path
7
+ CFG = "yolo11n.yaml"
8
8
  SOURCE = ASSETS / "bus.jpg"
9
9
  SOURCES_LIST = [ASSETS / "bus.jpg", ASSETS, ASSETS / "*", ASSETS / "**/*.jpg"]
10
10
  TMP = (ROOT / "../tests/tmp").resolve() # temp directory for test files
tests/conftest.py CHANGED
@@ -74,7 +74,7 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config):
74
74
 
75
75
  # Remove files
76
76
  models = [path for x in ["*.onnx", "*.torchscript"] for path in WEIGHTS_DIR.rglob(x)]
77
- for file in ["bus.jpg", "yolov8n.onnx", "yolov8n.torchscript"] + models:
77
+ for file in ["bus.jpg", "yolo11n.onnx", "yolo11n.torchscript"] + models:
78
78
  Path(file).unlink(missing_ok=True)
79
79
 
80
80
  # Remove directories
tests/test_cuda.py CHANGED
@@ -60,7 +60,7 @@ def test_train():
60
60
  @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
61
61
  def test_predict_multiple_devices():
62
62
  """Validate model prediction consistency across CPU and CUDA devices."""
63
- model = YOLO("yolov8n.pt")
63
+ model = YOLO("yolo11n.pt")
64
64
  model = model.cpu()
65
65
  assert str(model.device) == "cpu"
66
66
  _ = model(SOURCE) # CPU inference
tests/test_engine.py CHANGED
@@ -21,13 +21,13 @@ def test_export():
21
21
  exporter = Exporter()
22
22
  exporter.add_callback("on_export_start", test_func)
23
23
  assert test_func in exporter.callbacks["on_export_start"], "callback test failed"
24
- f = exporter(model=YOLO("yolov8n.yaml").model)
24
+ f = exporter(model=YOLO("yolo11n.yaml").model)
25
25
  YOLO(f)(ASSETS) # exported model inference
26
26
 
27
27
 
28
28
  def test_detect():
29
29
  """Test YOLO object detection training, validation, and prediction functionality."""
30
- overrides = {"data": "coco8.yaml", "model": "yolov8n.yaml", "imgsz": 32, "epochs": 1, "save": False}
30
+ overrides = {"data": "coco8.yaml", "model": "yolo11n.yaml", "imgsz": 32, "epochs": 1, "save": False}
31
31
  cfg = get_cfg(DEFAULT_CFG)
32
32
  cfg.data = "coco8.yaml"
33
33
  cfg.imgsz = 32
@@ -66,7 +66,7 @@ def test_detect():
66
66
 
67
67
  def test_segment():
68
68
  """Tests image segmentation training, validation, and prediction pipelines using YOLO models."""
69
- overrides = {"data": "coco8-seg.yaml", "model": "yolov8n-seg.yaml", "imgsz": 32, "epochs": 1, "save": False}
69
+ overrides = {"data": "coco8-seg.yaml", "model": "yolo11n-seg.yaml", "imgsz": 32, "epochs": 1, "save": False}
70
70
  cfg = get_cfg(DEFAULT_CFG)
71
71
  cfg.data = "coco8-seg.yaml"
72
72
  cfg.imgsz = 32
@@ -88,7 +88,7 @@ def test_segment():
88
88
  pred = segment.SegmentationPredictor(overrides={"imgsz": [64, 64]})
89
89
  pred.add_callback("on_predict_start", test_func)
90
90
  assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
91
- result = pred(source=ASSETS, model=WEIGHTS_DIR / "yolov8n-seg.pt")
91
+ result = pred(source=ASSETS, model=WEIGHTS_DIR / "yolo11n-seg.pt")
92
92
  assert len(result), "predictor test failed"
93
93
 
94
94
  # Test resume
@@ -105,7 +105,7 @@ def test_segment():
105
105
 
106
106
  def test_classify():
107
107
  """Test image classification including training, validation, and prediction phases."""
108
- overrides = {"data": "imagenet10", "model": "yolov8n-cls.yaml", "imgsz": 32, "epochs": 1, "save": False}
108
+ overrides = {"data": "imagenet10", "model": "yolo11n-cls.yaml", "imgsz": 32, "epochs": 1, "save": False}
109
109
  cfg = get_cfg(DEFAULT_CFG)
110
110
  cfg.data = "imagenet10"
111
111
  cfg.imgsz = 32
tests/test_explorer.py CHANGED
@@ -30,7 +30,7 @@ def test_similarity():
30
30
  @pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13")
31
31
  def test_det():
32
32
  """Test detection functionalities and verify embedding table includes bounding boxes."""
33
- exp = Explorer(data="coco8.yaml", model="yolov8n.pt")
33
+ exp = Explorer(data="coco8.yaml", model="yolo11n.pt")
34
34
  exp.create_embeddings_table(force=True)
35
35
  assert len(exp.table.head()["bboxes"]) > 0
36
36
  similar = exp.get_similar(idx=[1, 2], limit=10)
@@ -44,7 +44,7 @@ def test_det():
44
44
  @pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13")
45
45
  def test_seg():
46
46
  """Test segmentation functionalities and ensure the embedding table includes segmentation masks."""
47
- exp = Explorer(data="coco8-seg.yaml", model="yolov8n-seg.pt")
47
+ exp = Explorer(data="coco8-seg.yaml", model="yolo11n-seg.pt")
48
48
  exp.create_embeddings_table(force=True)
49
49
  assert len(exp.table.head()["masks"]) > 0
50
50
  similar = exp.get_similar(idx=[1, 2], limit=10)
@@ -57,7 +57,7 @@ def test_seg():
57
57
  @pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13")
58
58
  def test_pose():
59
59
  """Test pose estimation functionality and verify the embedding table includes keypoints."""
60
- exp = Explorer(data="coco8-pose.yaml", model="yolov8n-pose.pt")
60
+ exp = Explorer(data="coco8-pose.yaml", model="yolo11n-pose.pt")
61
61
  exp.create_embeddings_table(force=True)
62
62
  assert len(exp.table.head()["keypoints"]) > 0
63
63
  similar = exp.get_similar(idx=[1, 2], limit=10)
tests/test_exports.py CHANGED
@@ -40,7 +40,6 @@ def test_export_openvino():
40
40
 
41
41
 
42
42
  @pytest.mark.slow
43
- @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="OpenVINO not supported in Python 3.12")
44
43
  @pytest.mark.skipif(not TORCH_1_13, reason="OpenVINO requires torch>=1.13")
45
44
  @pytest.mark.parametrize(
46
45
  "task, dynamic, int8, half, batch",
@@ -187,7 +186,7 @@ def test_export_pb():
187
186
  YOLO(file)(SOURCE, imgsz=32)
188
187
 
189
188
 
190
- @pytest.mark.skipif(True, reason="Test disabled as Paddle protobuf and ONNX protobuf requirementsk conflict.")
189
+ @pytest.mark.skipif(True, reason="Test disabled as Paddle protobuf and ONNX protobuf requirements conflict.")
191
190
  def test_export_paddle():
192
191
  """Test YOLO exports to Paddle format, noting protobuf conflicts with ONNX."""
193
192
  YOLO(MODEL).export(format="paddle", imgsz=32)
@@ -17,7 +17,7 @@ from ultralytics.utils.checks import check_requirements
17
17
  @pytest.mark.skipif(not check_requirements("ray", install=False), reason="ray[tune] not installed")
18
18
  def test_model_ray_tune():
19
19
  """Tune YOLO model using Ray for hyperparameter optimization."""
20
- YOLO("yolov8n-cls.yaml").tune(
20
+ YOLO("yolo11n-cls.yaml").tune(
21
21
  use_ray=True, data="imagenet10", grace_period=1, iterations=1, imgsz=32, epochs=1, plots=False, device="cpu"
22
22
  )
23
23
 
@@ -26,7 +26,7 @@ def test_model_ray_tune():
26
26
  def test_mlflow():
27
27
  """Test training with MLflow tracking enabled (see https://mlflow.org/ for details)."""
28
28
  SETTINGS["mlflow"] = True
29
- YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu")
29
+ YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu")
30
30
  SETTINGS["mlflow"] = False
31
31
 
32
32
 
@@ -42,7 +42,7 @@ def test_mlflow_keep_run_active():
42
42
 
43
43
  # Test with MLFLOW_KEEP_RUN_ACTIVE=True
44
44
  os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "True"
45
- YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
45
+ YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
46
46
  status = mlflow.active_run().info.status
47
47
  assert status == "RUNNING", "MLflow run should be active when MLFLOW_KEEP_RUN_ACTIVE=True"
48
48
 
@@ -50,13 +50,13 @@ def test_mlflow_keep_run_active():
50
50
 
51
51
  # Test with MLFLOW_KEEP_RUN_ACTIVE=False
52
52
  os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "False"
53
- YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
53
+ YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
54
54
  status = mlflow.get_run(run_id=run_id).info.status
55
55
  assert status == "FINISHED", "MLflow run should be ended when MLFLOW_KEEP_RUN_ACTIVE=False"
56
56
 
57
57
  # Test with MLFLOW_KEEP_RUN_ACTIVE not set
58
58
  os.environ.pop("MLFLOW_KEEP_RUN_ACTIVE", None)
59
- YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
59
+ YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
60
60
  status = mlflow.get_run(run_id=run_id).info.status
61
61
  assert status == "FINISHED", "MLflow run should be ended by default when MLFLOW_KEEP_RUN_ACTIVE is not set"
62
62
  SETTINGS["mlflow"] = False
@@ -126,23 +126,23 @@ def test_pycocotools():
126
126
  from ultralytics.models.yolo.segment import SegmentationValidator
127
127
 
128
128
  # Download annotations after each dataset downloads first
129
- url = "https://github.com/ultralytics/assets/releases/download/v8.2.0/"
129
+ url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
130
130
 
131
- args = {"model": "yolov8n.pt", "data": "coco8.yaml", "save_json": True, "imgsz": 64}
131
+ args = {"model": "yolo11n.pt", "data": "coco8.yaml", "save_json": True, "imgsz": 64}
132
132
  validator = DetectionValidator(args=args)
133
133
  validator()
134
134
  validator.is_coco = True
135
135
  download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8/annotations")
136
136
  _ = validator.eval_json(validator.stats)
137
137
 
138
- args = {"model": "yolov8n-seg.pt", "data": "coco8-seg.yaml", "save_json": True, "imgsz": 64}
138
+ args = {"model": "yolo11n-seg.pt", "data": "coco8-seg.yaml", "save_json": True, "imgsz": 64}
139
139
  validator = SegmentationValidator(args=args)
140
140
  validator()
141
141
  validator.is_coco = True
142
142
  download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8-seg/annotations")
143
143
  _ = validator.eval_json(validator.stats)
144
144
 
145
- args = {"model": "yolov8n-pose.pt", "data": "coco8-pose.yaml", "save_json": True, "imgsz": 64}
145
+ args = {"model": "yolo11n-pose.pt", "data": "coco8-pose.yaml", "save_json": True, "imgsz": 64}
146
146
  validator = PoseValidator(args=args)
147
147
  validator()
148
148
  validator.is_coco = True
tests/test_python.py CHANGED
@@ -211,7 +211,7 @@ def test_train_scratch():
211
211
 
212
212
  def test_train_pretrained():
213
213
  """Test training of the YOLO model starting from a pre-trained checkpoint."""
214
- model = YOLO(WEIGHTS_DIR / "yolov8n-seg.pt")
214
+ model = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
215
215
  model.train(data="coco8-seg.yaml", epochs=1, imgsz=32, cache="ram", copy_paste=0.5, mixup=0.5, name=0)
216
216
  model(SOURCE)
217
217
 
@@ -281,13 +281,13 @@ def test_results(model):
281
281
  def test_labels_and_crops():
282
282
  """Test output from prediction args for saving YOLO detection labels and crops; ensures accurate saving."""
283
283
  imgs = [SOURCE, ASSETS / "zidane.jpg"]
284
- results = YOLO(WEIGHTS_DIR / "yolov8n.pt")(imgs, imgsz=160, save_txt=True, save_crop=True)
284
+ results = YOLO(WEIGHTS_DIR / "yolo11n.pt")(imgs, imgsz=160, save_txt=True, save_crop=True)
285
285
  save_path = Path(results[0].save_dir)
286
286
  for r in results:
287
287
  im_name = Path(r.path).stem
288
288
  cls_idxs = r.boxes.cls.int().tolist()
289
289
  # Check correct detections
290
- assert cls_idxs == ([0, 0, 5, 0, 7] if r.path.endswith("bus.jpg") else [0, 0]) # bus.jpg and zidane.jpg classes
290
+ assert cls_idxs == ([0, 7, 0, 0] if r.path.endswith("bus.jpg") else [0, 0, 0]) # bus.jpg and zidane.jpg classes
291
291
  # Check label path
292
292
  labels = save_path / f"labels/{im_name}.txt"
293
293
  assert labels.exists()
@@ -339,7 +339,7 @@ def test_data_annotator():
339
339
 
340
340
  auto_annotate(
341
341
  ASSETS,
342
- det_model=WEIGHTS_DIR / "yolov8n.pt",
342
+ det_model=WEIGHTS_DIR / "yolo11n.pt",
343
343
  sam_model=WEIGHTS_DIR / "mobile_sam.pt",
344
344
  output_dir=TMP / "auto_annotate_labels",
345
345
  )
@@ -393,7 +393,7 @@ def test_utils_benchmarks():
393
393
  """Benchmark model performance using 'ProfileModels' from 'ultralytics.utils.benchmarks'."""
394
394
  from ultralytics.utils.benchmarks import ProfileModels
395
395
 
396
- ProfileModels(["yolov8n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
396
+ ProfileModels(["yolo11n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
397
397
 
398
398
 
399
399
  def test_utils_torchutils():
@@ -568,14 +568,14 @@ def test_classify_transforms_train(image, auto_augment, erasing, force_color_jit
568
568
  @pytest.mark.skipif(not ONLINE, reason="environment is offline")
569
569
  def test_model_tune():
570
570
  """Tune YOLO model for performance improvement."""
571
- YOLO("yolov8n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
572
- YOLO("yolov8n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
571
+ YOLO("yolo11n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
572
+ YOLO("yolo11n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
573
573
 
574
574
 
575
575
  def test_model_embeddings():
576
576
  """Test YOLO model embeddings."""
577
577
  model_detect = YOLO(MODEL)
578
- model_segment = YOLO(WEIGHTS_DIR / "yolov8n-seg.pt")
578
+ model_segment = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
579
579
 
580
580
  for batch in [SOURCE], [SOURCE, SOURCE]: # test batch size 1 and 2
581
581
  assert len(model_detect.embed(source=batch, imgsz=32)) == len(batch)
@@ -585,11 +585,11 @@ def test_model_embeddings():
585
585
  @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="YOLOWorld with CLIP is not supported in Python 3.12")
586
586
  def test_yolo_world():
587
587
  """Tests YOLO world models with CLIP support, including detection and training scenarios."""
588
- model = YOLO("yolov8s-world.pt") # no YOLOv8n-world model yet
588
+ model = YOLO("yolov8s-world.pt") # no YOLO11n-world model yet
589
589
  model.set_classes(["tree", "window"])
590
590
  model(SOURCE, conf=0.01)
591
591
 
592
- model = YOLO("yolov8s-worldv2.pt") # no YOLOv8n-world model yet
592
+ model = YOLO("yolov8s-worldv2.pt") # no YOLO11n-world model yet
593
593
  # Training from a pretrained model. Eval is included at the final stage of training.
594
594
  # Use dota8.yaml which has fewer categories to reduce the inference time of CLIP model
595
595
  model.train(
@@ -603,7 +603,7 @@ def test_yolo_world():
603
603
  # test WorWorldTrainerFromScratch
604
604
  from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
605
605
 
606
- model = YOLO("yolov8s-worldv2.yaml") # no YOLOv8n-world model yet
606
+ model = YOLO("yolov8s-worldv2.yaml") # no YOLO11n-world model yet
607
607
  model.train(
608
608
  data={"train": {"yolo_data": ["dota8.yaml"]}, "val": {"yolo_data": ["dota8.yaml"]}},
609
609
  epochs=1,
tests/test_solutions.py CHANGED
@@ -14,7 +14,7 @@ WORKOUTS_SOLUTION_DEMO = "https://github.com/ultralytics/assets/releases/downloa
14
14
  def test_major_solutions():
15
15
  """Test the object counting, heatmap, speed estimation and queue management solution."""
16
16
  safe_download(url=MAJOR_SOLUTIONS_DEMO)
17
- model = YOLO("yolov8n.pt")
17
+ model = YOLO("yolo11n.pt")
18
18
  names = model.names
19
19
  cap = cv2.VideoCapture("solutions_ci_demo.mp4")
20
20
  assert cap.isOpened(), "Error reading video file"
@@ -41,7 +41,7 @@ def test_major_solutions():
41
41
  def test_aigym():
42
42
  """Test the workouts monitoring solution."""
43
43
  safe_download(url=WORKOUTS_SOLUTION_DEMO)
44
- model = YOLO("yolov8n-pose.pt")
44
+ model = YOLO("yolo11n-pose.pt")
45
45
  cap = cv2.VideoCapture("solution_ci_pose_demo.mp4")
46
46
  assert cap.isOpened(), "Error reading video file"
47
47
  gym_object = solutions.AIGym(line_thickness=2, pose_type="squat", kpts_to_check=[5, 11, 13])
@@ -60,7 +60,7 @@ def test_instance_segmentation():
60
60
  """Test the instance segmentation solution."""
61
61
  from ultralytics.utils.plotting import Annotator, colors
62
62
 
63
- model = YOLO("yolov8n-seg.pt")
63
+ model = YOLO("yolo11n-seg.pt")
64
64
  names = model.names
65
65
  cap = cv2.VideoCapture("solutions_ci_demo.mp4")
66
66
  assert cap.isOpened(), "Error reading video file"
ultralytics/__init__.py CHANGED
@@ -1,7 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.2.103"
4
-
3
+ __version__ = "8.3.0"
5
4
 
6
5
  import os
7
6
 
@@ -9,8 +9,8 @@
9
9
 
10
10
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
11
  path: ../datasets/hand-keypoints # dataset root dir
12
- train: train # train images (relative to 'path') 210 images
13
- val: val # val images (relative to 'path') 53 images
12
+ train: train # train images (relative to 'path') 18776 images
13
+ val: val # val images (relative to 'path') 7992 images
14
14
 
15
15
  # Keypoints
16
16
  kpt_shape: [21, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
@@ -115,6 +115,7 @@ bgr: 0.0 # (float) image channel BGR (probability)
115
115
  mosaic: 1.0 # (float) image mosaic (probability)
116
116
  mixup: 0.0 # (float) image mixup (probability)
117
117
  copy_paste: 0.0 # (float) segment copy-paste (probability)
118
+ copy_paste_mode: "flip" # (str) the method to do copy_paste augmentation (flip, mixup)
118
119
  auto_augment: randaugment # (str) auto augmentation policy for classification (randaugment, autoaugment, augmix)
119
120
  erasing: 0.4 # (float) probability of random erasing during classification training (0-0.9), 0 means no erasing, must be less than 1.0.
120
121
  crop_fraction: 1.0 # (float) image crop fraction for classification (0.1-1), 1.0 means no crop, must be greater than 0.
@@ -0,0 +1,30 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+ # YOLO11-cls image classification model. For Usage examples see https://docs.ultralytics.com/tasks/classify
3
+
4
+ # Parameters
5
+ nc: 80 # number of classes
6
+ scales: # model compound scaling constants, i.e. 'model=yolo11n-cls.yaml' will call yolo11-cls.yaml with scale 'n'
7
+ # [depth, width, max_channels]
8
+ n: [0.50, 0.25, 1024] # summary: 151 layers, 1633584 parameters, 1633584 gradients, 3.3 GFLOPs
9
+ s: [0.50, 0.50, 1024] # summary: 151 layers, 5545488 parameters, 5545488 gradients, 12.2 GFLOPs
10
+ m: [0.50, 1.00, 512] # summary: 187 layers, 10455696 parameters, 10455696 gradients, 39.7 GFLOPs
11
+ l: [1.00, 1.00, 512] # summary: 309 layers, 12937104 parameters, 12937104 gradients, 49.9 GFLOPs
12
+ x: [1.00, 1.50, 512] # summary: 309 layers, 28458544 parameters, 28458544 gradients, 111.1 GFLOPs
13
+
14
+ # YOLO11n backbone
15
+ backbone:
16
+ # [from, repeats, module, args]
17
+ - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
18
+ - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
19
+ - [-1, 2, C3k2, [256, False, 0.25]]
20
+ - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
21
+ - [-1, 2, C3k2, [512, False, 0.25]]
22
+ - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
23
+ - [-1, 2, C3k2, [512, True]]
24
+ - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
25
+ - [-1, 2, C3k2, [1024, True]]
26
+ - [-1, 2, C2PSA, [1024]] # 9
27
+
28
+ # YOLO11n head
29
+ head:
30
+ - [-1, 1, Classify, [nc]] # Classify
@@ -0,0 +1,47 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+ # YOLO11 Oriented Bounding Boxes (OBB) model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/obb
3
+
4
+ # Parameters
5
+ nc: 80 # number of classes
6
+ scales: # model compound scaling constants, i.e. 'model=yolo11n-obb.yaml' will call yolo11-obb.yaml with scale 'n'
7
+ # [depth, width, max_channels]
8
+ n: [0.50, 0.25, 1024] # summary: 344 layers, 2695747 parameters, 2695731 gradients, 6.9 GFLOPs
9
+ s: [0.50, 0.50, 1024] # summary: 344 layers, 9744931 parameters, 9744915 gradients, 22.7 GFLOPs
10
+ m: [0.50, 1.00, 512] # summary: 434 layers, 20963523 parameters, 20963507 gradients, 72.2 GFLOPs
11
+ l: [1.00, 1.00, 512] # summary: 656 layers, 26220995 parameters, 26220979 gradients, 91.3 GFLOPs
12
+ x: [1.00, 1.50, 512] # summary: 656 layers, 58875331 parameters, 58875315 gradients, 204.3 GFLOPs
13
+
14
+ # YOLO11n backbone
15
+ backbone:
16
+ # [from, repeats, module, args]
17
+ - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
18
+ - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
19
+ - [-1, 2, C3k2, [256, False, 0.25]]
20
+ - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
21
+ - [-1, 2, C3k2, [512, False, 0.25]]
22
+ - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
23
+ - [-1, 2, C3k2, [512, True]]
24
+ - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
25
+ - [-1, 2, C3k2, [1024, True]]
26
+ - [-1, 1, SPPF, [1024, 5]] # 9
27
+ - [-1, 2, C2PSA, [1024]] # 10
28
+
29
+ # YOLO11n head
30
+ head:
31
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
32
+ - [[-1, 6], 1, Concat, [1]] # cat backbone P4
33
+ - [-1, 2, C3k2, [512, False]] # 13
34
+
35
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
36
+ - [[-1, 4], 1, Concat, [1]] # cat backbone P3
37
+ - [-1, 2, C3k2, [256, False]] # 16 (P3/8-small)
38
+
39
+ - [-1, 1, Conv, [256, 3, 2]]
40
+ - [[-1, 13], 1, Concat, [1]] # cat head P4
41
+ - [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium)
42
+
43
+ - [-1, 1, Conv, [512, 3, 2]]
44
+ - [[-1, 10], 1, Concat, [1]] # cat head P5
45
+ - [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large)
46
+
47
+ - [[16, 19, 22], 1, OBB, [nc, 1]] # Detect(P3, P4, P5)
@@ -0,0 +1,48 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+ # YOLO11-pose keypoints/pose estimation model. For Usage examples see https://docs.ultralytics.com/tasks/pose
3
+
4
+ # Parameters
5
+ nc: 80 # number of classes
6
+ kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
7
+ scales: # model compound scaling constants, i.e. 'model=yolo11n-pose.yaml' will call yolo11.yaml with scale 'n'
8
+ # [depth, width, max_channels]
9
+ n: [0.50, 0.25, 1024] # summary: 344 layers, 2908507 parameters, 2908491 gradients, 7.7 GFLOPs
10
+ s: [0.50, 0.50, 1024] # summary: 344 layers, 9948811 parameters, 9948795 gradients, 23.5 GFLOPs
11
+ m: [0.50, 1.00, 512] # summary: 434 layers, 20973273 parameters, 20973257 gradients, 72.3 GFLOPs
12
+ l: [1.00, 1.00, 512] # summary: 656 layers, 26230745 parameters, 26230729 gradients, 91.4 GFLOPs
13
+ x: [1.00, 1.50, 512] # summary: 656 layers, 58889881 parameters, 58889865 gradients, 204.3 GFLOPs
14
+
15
+ # YOLO11n backbone
16
+ backbone:
17
+ # [from, repeats, module, args]
18
+ - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
19
+ - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
20
+ - [-1, 2, C3k2, [256, False, 0.25]]
21
+ - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
22
+ - [-1, 2, C3k2, [512, False, 0.25]]
23
+ - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
24
+ - [-1, 2, C3k2, [512, True]]
25
+ - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
26
+ - [-1, 2, C3k2, [1024, True]]
27
+ - [-1, 1, SPPF, [1024, 5]] # 9
28
+ - [-1, 2, C2PSA, [1024]] # 10
29
+
30
+ # YOLO11n head
31
+ head:
32
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
33
+ - [[-1, 6], 1, Concat, [1]] # cat backbone P4
34
+ - [-1, 2, C3k2, [512, False]] # 13
35
+
36
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
37
+ - [[-1, 4], 1, Concat, [1]] # cat backbone P3
38
+ - [-1, 2, C3k2, [256, False]] # 16 (P3/8-small)
39
+
40
+ - [-1, 1, Conv, [256, 3, 2]]
41
+ - [[-1, 13], 1, Concat, [1]] # cat head P4
42
+ - [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium)
43
+
44
+ - [-1, 1, Conv, [512, 3, 2]]
45
+ - [[-1, 10], 1, Concat, [1]] # cat head P5
46
+ - [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large)
47
+
48
+ - [[16, 19, 22], 1, Pose, [nc, kpt_shape]] # Detect(P3, P4, P5)
@@ -0,0 +1,47 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+ # YOLO11-seg instance segmentation model. For Usage examples see https://docs.ultralytics.com/tasks/segment
3
+
4
+ # Parameters
5
+ nc: 80 # number of classes
6
+ scales: # model compound scaling constants, i.e. 'model=yolo11n-seg.yaml' will call yolo11-seg.yaml with scale 'n'
7
+ # [depth, width, max_channels]
8
+ n: [0.50, 0.25, 1024] # summary: 355 layers, 2876848 parameters, 2876832 gradients, 10.5 GFLOPs
9
+ s: [0.50, 0.50, 1024] # summary: 355 layers, 10113248 parameters, 10113232 gradients, 35.8 GFLOPs
10
+ m: [0.50, 1.00, 512] # summary: 445 layers, 22420896 parameters, 22420880 gradients, 123.9 GFLOPs
11
+ l: [1.00, 1.00, 512] # summary: 667 layers, 27678368 parameters, 27678352 gradients, 143.0 GFLOPs
12
+ x: [1.00, 1.50, 512] # summary: 667 layers, 62142656 parameters, 62142640 gradients, 320.2 GFLOPs
13
+
14
+ # YOLO11n backbone
15
+ backbone:
16
+ # [from, repeats, module, args]
17
+ - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
18
+ - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
19
+ - [-1, 2, C3k2, [256, False, 0.25]]
20
+ - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
21
+ - [-1, 2, C3k2, [512, False, 0.25]]
22
+ - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
23
+ - [-1, 2, C3k2, [512, True]]
24
+ - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
25
+ - [-1, 2, C3k2, [1024, True]]
26
+ - [-1, 1, SPPF, [1024, 5]] # 9
27
+ - [-1, 2, C2PSA, [1024]] # 10
28
+
29
+ # YOLO11n head
30
+ head:
31
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
32
+ - [[-1, 6], 1, Concat, [1]] # cat backbone P4
33
+ - [-1, 2, C3k2, [512, False]] # 13
34
+
35
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
36
+ - [[-1, 4], 1, Concat, [1]] # cat backbone P3
37
+ - [-1, 2, C3k2, [256, False]] # 16 (P3/8-small)
38
+
39
+ - [-1, 1, Conv, [256, 3, 2]]
40
+ - [[-1, 13], 1, Concat, [1]] # cat head P4
41
+ - [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium)
42
+
43
+ - [-1, 1, Conv, [512, 3, 2]]
44
+ - [[-1, 10], 1, Concat, [1]] # cat head P5
45
+ - [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large)
46
+
47
+ - [[16, 19, 22], 1, Segment, [nc, 32, 256]] # Detect(P3, P4, P5)
@@ -0,0 +1,47 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+ # YOLO11 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
3
+
4
+ # Parameters
5
+ nc: 80 # number of classes
6
+ scales: # model compound scaling constants, i.e. 'model=yolo11n.yaml' will call yolo11.yaml with scale 'n'
7
+ # [depth, width, max_channels]
8
+ n: [0.50, 0.25, 1024] # summary: 319 layers, 2624080 parameters, 2624064 gradients, 6.6 GFLOPs
9
+ s: [0.50, 0.50, 1024] # summary: 319 layers, 9458752 parameters, 9458736 gradients, 21.7 GFLOPs
10
+ m: [0.50, 1.00, 512] # summary: 409 layers, 20114688 parameters, 20114672 gradients, 68.5 GFLOPs
11
+ l: [1.00, 1.00, 512] # summary: 631 layers, 25372160 parameters, 25372144 gradients, 87.6 GFLOPs
12
+ x: [1.00, 1.50, 512] # summary: 631 layers, 56966176 parameters, 56966160 gradients, 196.0 GFLOPs
13
+
14
+ # YOLO11n backbone
15
+ backbone:
16
+ # [from, repeats, module, args]
17
+ - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
18
+ - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
19
+ - [-1, 2, C3k2, [256, False, 0.25]]
20
+ - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
21
+ - [-1, 2, C3k2, [512, False, 0.25]]
22
+ - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
23
+ - [-1, 2, C3k2, [512, True]]
24
+ - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
25
+ - [-1, 2, C3k2, [1024, True]]
26
+ - [-1, 1, SPPF, [1024, 5]] # 9
27
+ - [-1, 2, C2PSA, [1024]] # 10
28
+
29
+ # YOLO11n head
30
+ head:
31
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
32
+ - [[-1, 6], 1, Concat, [1]] # cat backbone P4
33
+ - [-1, 2, C3k2, [512, False]] # 13
34
+
35
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
36
+ - [[-1, 4], 1, Concat, [1]] # cat backbone P3
37
+ - [-1, 2, C3k2, [256, False]] # 16 (P3/8-small)
38
+
39
+ - [-1, 1, Conv, [256, 3, 2]]
40
+ - [[-1, 13], 1, Concat, [1]] # cat head P4
41
+ - [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium)
42
+
43
+ - [-1, 1, Conv, [512, 3, 2]]
44
+ - [[-1, 10], 1, Concat, [1]] # cat head P5
45
+ - [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large)
46
+
47
+ - [[16, 19, 22], 1, Detect, [nc]] # Detect(P3, P4, P5)