bplusplus 0.1.1__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of bplusplus might be problematic. Click here for more details.

Files changed (95) hide show
  1. bplusplus/__init__.py +5 -3
  2. bplusplus/{collect_images.py → collect.py} +3 -3
  3. bplusplus/prepare.py +573 -0
  4. bplusplus/train_validate.py +8 -64
  5. bplusplus/yolov5detect/__init__.py +1 -0
  6. bplusplus/yolov5detect/detect.py +444 -0
  7. bplusplus/yolov5detect/export.py +1530 -0
  8. bplusplus/yolov5detect/insect.yaml +8 -0
  9. bplusplus/yolov5detect/models/__init__.py +0 -0
  10. bplusplus/yolov5detect/models/common.py +1109 -0
  11. bplusplus/yolov5detect/models/experimental.py +130 -0
  12. bplusplus/yolov5detect/models/hub/anchors.yaml +56 -0
  13. bplusplus/yolov5detect/models/hub/yolov3-spp.yaml +52 -0
  14. bplusplus/yolov5detect/models/hub/yolov3-tiny.yaml +42 -0
  15. bplusplus/yolov5detect/models/hub/yolov3.yaml +52 -0
  16. bplusplus/yolov5detect/models/hub/yolov5-bifpn.yaml +49 -0
  17. bplusplus/yolov5detect/models/hub/yolov5-fpn.yaml +43 -0
  18. bplusplus/yolov5detect/models/hub/yolov5-p2.yaml +55 -0
  19. bplusplus/yolov5detect/models/hub/yolov5-p34.yaml +42 -0
  20. bplusplus/yolov5detect/models/hub/yolov5-p6.yaml +57 -0
  21. bplusplus/yolov5detect/models/hub/yolov5-p7.yaml +68 -0
  22. bplusplus/yolov5detect/models/hub/yolov5-panet.yaml +49 -0
  23. bplusplus/yolov5detect/models/hub/yolov5l6.yaml +61 -0
  24. bplusplus/yolov5detect/models/hub/yolov5m6.yaml +61 -0
  25. bplusplus/yolov5detect/models/hub/yolov5n6.yaml +61 -0
  26. bplusplus/yolov5detect/models/hub/yolov5s-LeakyReLU.yaml +50 -0
  27. bplusplus/yolov5detect/models/hub/yolov5s-ghost.yaml +49 -0
  28. bplusplus/yolov5detect/models/hub/yolov5s-transformer.yaml +49 -0
  29. bplusplus/yolov5detect/models/hub/yolov5s6.yaml +61 -0
  30. bplusplus/yolov5detect/models/hub/yolov5x6.yaml +61 -0
  31. bplusplus/yolov5detect/models/segment/yolov5l-seg.yaml +49 -0
  32. bplusplus/yolov5detect/models/segment/yolov5m-seg.yaml +49 -0
  33. bplusplus/yolov5detect/models/segment/yolov5n-seg.yaml +49 -0
  34. bplusplus/yolov5detect/models/segment/yolov5s-seg.yaml +49 -0
  35. bplusplus/yolov5detect/models/segment/yolov5x-seg.yaml +49 -0
  36. bplusplus/yolov5detect/models/tf.py +797 -0
  37. bplusplus/yolov5detect/models/yolo.py +495 -0
  38. bplusplus/yolov5detect/models/yolov5l.yaml +49 -0
  39. bplusplus/yolov5detect/models/yolov5m.yaml +49 -0
  40. bplusplus/yolov5detect/models/yolov5n.yaml +49 -0
  41. bplusplus/yolov5detect/models/yolov5s.yaml +49 -0
  42. bplusplus/yolov5detect/models/yolov5x.yaml +49 -0
  43. bplusplus/yolov5detect/utils/__init__.py +97 -0
  44. bplusplus/yolov5detect/utils/activations.py +134 -0
  45. bplusplus/yolov5detect/utils/augmentations.py +448 -0
  46. bplusplus/yolov5detect/utils/autoanchor.py +175 -0
  47. bplusplus/yolov5detect/utils/autobatch.py +70 -0
  48. bplusplus/yolov5detect/utils/aws/__init__.py +0 -0
  49. bplusplus/yolov5detect/utils/aws/mime.sh +26 -0
  50. bplusplus/yolov5detect/utils/aws/resume.py +41 -0
  51. bplusplus/yolov5detect/utils/aws/userdata.sh +27 -0
  52. bplusplus/yolov5detect/utils/callbacks.py +72 -0
  53. bplusplus/yolov5detect/utils/dataloaders.py +1385 -0
  54. bplusplus/yolov5detect/utils/docker/Dockerfile +73 -0
  55. bplusplus/yolov5detect/utils/docker/Dockerfile-arm64 +40 -0
  56. bplusplus/yolov5detect/utils/docker/Dockerfile-cpu +42 -0
  57. bplusplus/yolov5detect/utils/downloads.py +136 -0
  58. bplusplus/yolov5detect/utils/flask_rest_api/README.md +70 -0
  59. bplusplus/yolov5detect/utils/flask_rest_api/example_request.py +17 -0
  60. bplusplus/yolov5detect/utils/flask_rest_api/restapi.py +49 -0
  61. bplusplus/yolov5detect/utils/general.py +1294 -0
  62. bplusplus/yolov5detect/utils/google_app_engine/Dockerfile +25 -0
  63. bplusplus/yolov5detect/utils/google_app_engine/additional_requirements.txt +6 -0
  64. bplusplus/yolov5detect/utils/google_app_engine/app.yaml +16 -0
  65. bplusplus/yolov5detect/utils/loggers/__init__.py +476 -0
  66. bplusplus/yolov5detect/utils/loggers/clearml/README.md +222 -0
  67. bplusplus/yolov5detect/utils/loggers/clearml/__init__.py +0 -0
  68. bplusplus/yolov5detect/utils/loggers/clearml/clearml_utils.py +230 -0
  69. bplusplus/yolov5detect/utils/loggers/clearml/hpo.py +90 -0
  70. bplusplus/yolov5detect/utils/loggers/comet/README.md +250 -0
  71. bplusplus/yolov5detect/utils/loggers/comet/__init__.py +551 -0
  72. bplusplus/yolov5detect/utils/loggers/comet/comet_utils.py +151 -0
  73. bplusplus/yolov5detect/utils/loggers/comet/hpo.py +126 -0
  74. bplusplus/yolov5detect/utils/loggers/comet/optimizer_config.json +135 -0
  75. bplusplus/yolov5detect/utils/loggers/wandb/__init__.py +0 -0
  76. bplusplus/yolov5detect/utils/loggers/wandb/wandb_utils.py +210 -0
  77. bplusplus/yolov5detect/utils/loss.py +259 -0
  78. bplusplus/yolov5detect/utils/metrics.py +381 -0
  79. bplusplus/yolov5detect/utils/plots.py +517 -0
  80. bplusplus/yolov5detect/utils/segment/__init__.py +0 -0
  81. bplusplus/yolov5detect/utils/segment/augmentations.py +100 -0
  82. bplusplus/yolov5detect/utils/segment/dataloaders.py +366 -0
  83. bplusplus/yolov5detect/utils/segment/general.py +160 -0
  84. bplusplus/yolov5detect/utils/segment/loss.py +198 -0
  85. bplusplus/yolov5detect/utils/segment/metrics.py +225 -0
  86. bplusplus/yolov5detect/utils/segment/plots.py +152 -0
  87. bplusplus/yolov5detect/utils/torch_utils.py +482 -0
  88. bplusplus/yolov5detect/utils/triton.py +90 -0
  89. bplusplus-1.1.0.dist-info/METADATA +179 -0
  90. bplusplus-1.1.0.dist-info/RECORD +92 -0
  91. bplusplus/build_model.py +0 -38
  92. bplusplus-0.1.1.dist-info/METADATA +0 -97
  93. bplusplus-0.1.1.dist-info/RECORD +0 -8
  94. {bplusplus-0.1.1.dist-info → bplusplus-1.1.0.dist-info}/LICENSE +0 -0
  95. {bplusplus-0.1.1.dist-info → bplusplus-1.1.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,126 @@
1
+ # Ultralytics YOLOv5 🚀, AGPL-3.0 license
2
+
3
+ import argparse
4
+ import json
5
+ import logging
6
+ import os
7
+ import sys
8
+ from pathlib import Path
9
+
10
+ import comet_ml
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+ FILE = Path(__file__).resolve()
15
+ ROOT = FILE.parents[3] # YOLOv5 root directory
16
+ if str(ROOT) not in sys.path:
17
+ sys.path.append(str(ROOT)) # add ROOT to PATH
18
+
19
+ from train import train
20
+ from utils.callbacks import Callbacks
21
+ from utils.general import increment_path
22
+ from utils.torch_utils import select_device
23
+
24
+ # Project Configuration
25
+ config = comet_ml.config.get_config()
26
+ COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5")
27
+
28
+
29
+ def get_args(known=False):
30
+ """Parses command-line arguments for YOLOv5 training, supporting configuration of weights, data paths,
31
+ hyperparameters, and more.
32
+ """
33
+ parser = argparse.ArgumentParser()
34
+ parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="initial weights path")
35
+ parser.add_argument("--cfg", type=str, default="", help="model.yaml path")
36
+ parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path")
37
+ parser.add_argument("--hyp", type=str, default=ROOT / "data/hyps/hyp.scratch-low.yaml", help="hyperparameters path")
38
+ parser.add_argument("--epochs", type=int, default=300, help="total training epochs")
39
+ parser.add_argument("--batch-size", type=int, default=16, help="total batch size for all GPUs, -1 for autobatch")
40
+ parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=640, help="train, val image size (pixels)")
41
+ parser.add_argument("--rect", action="store_true", help="rectangular training")
42
+ parser.add_argument("--resume", nargs="?", const=True, default=False, help="resume most recent training")
43
+ parser.add_argument("--nosave", action="store_true", help="only save final checkpoint")
44
+ parser.add_argument("--noval", action="store_true", help="only validate final epoch")
45
+ parser.add_argument("--noautoanchor", action="store_true", help="disable AutoAnchor")
46
+ parser.add_argument("--noplots", action="store_true", help="save no plot files")
47
+ parser.add_argument("--evolve", type=int, nargs="?", const=300, help="evolve hyperparameters for x generations")
48
+ parser.add_argument("--bucket", type=str, default="", help="gsutil bucket")
49
+ parser.add_argument("--cache", type=str, nargs="?", const="ram", help='--cache images in "ram" (default) or "disk"')
50
+ parser.add_argument("--image-weights", action="store_true", help="use weighted image selection for training")
51
+ parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
52
+ parser.add_argument("--multi-scale", action="store_true", help="vary img-size +/- 50%%")
53
+ parser.add_argument("--single-cls", action="store_true", help="train multi-class data as single-class")
54
+ parser.add_argument("--optimizer", type=str, choices=["SGD", "Adam", "AdamW"], default="SGD", help="optimizer")
55
+ parser.add_argument("--sync-bn", action="store_true", help="use SyncBatchNorm, only available in DDP mode")
56
+ parser.add_argument("--workers", type=int, default=8, help="max dataloader workers (per RANK in DDP mode)")
57
+ parser.add_argument("--project", default=ROOT / "runs/train", help="save to project/name")
58
+ parser.add_argument("--name", default="exp", help="save to project/name")
59
+ parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
60
+ parser.add_argument("--quad", action="store_true", help="quad dataloader")
61
+ parser.add_argument("--cos-lr", action="store_true", help="cosine LR scheduler")
62
+ parser.add_argument("--label-smoothing", type=float, default=0.0, help="Label smoothing epsilon")
63
+ parser.add_argument("--patience", type=int, default=100, help="EarlyStopping patience (epochs without improvement)")
64
+ parser.add_argument("--freeze", nargs="+", type=int, default=[0], help="Freeze layers: backbone=10, first3=0 1 2")
65
+ parser.add_argument("--save-period", type=int, default=-1, help="Save checkpoint every x epochs (disabled if < 1)")
66
+ parser.add_argument("--seed", type=int, default=0, help="Global training seed")
67
+ parser.add_argument("--local_rank", type=int, default=-1, help="Automatic DDP Multi-GPU argument, do not modify")
68
+
69
+ # Weights & Biases arguments
70
+ parser.add_argument("--entity", default=None, help="W&B: Entity")
71
+ parser.add_argument("--upload_dataset", nargs="?", const=True, default=False, help='W&B: Upload data, "val" option')
72
+ parser.add_argument("--bbox_interval", type=int, default=-1, help="W&B: Set bounding-box image logging interval")
73
+ parser.add_argument("--artifact_alias", type=str, default="latest", help="W&B: Version of dataset artifact to use")
74
+
75
+ # Comet Arguments
76
+ parser.add_argument("--comet_optimizer_config", type=str, help="Comet: Path to a Comet Optimizer Config File.")
77
+ parser.add_argument("--comet_optimizer_id", type=str, help="Comet: ID of the Comet Optimizer sweep.")
78
+ parser.add_argument("--comet_optimizer_objective", type=str, help="Comet: Set to 'minimize' or 'maximize'.")
79
+ parser.add_argument("--comet_optimizer_metric", type=str, help="Comet: Metric to Optimize.")
80
+ parser.add_argument(
81
+ "--comet_optimizer_workers",
82
+ type=int,
83
+ default=1,
84
+ help="Comet: Number of Parallel Workers to use with the Comet Optimizer.",
85
+ )
86
+
87
+ return parser.parse_known_args()[0] if known else parser.parse_args()
88
+
89
+
90
+ def run(parameters, opt):
91
+ """Executes YOLOv5 training with given hyperparameters and options, setting up device and training directories."""
92
+ hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]}
93
+
94
+ opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve))
95
+ opt.batch_size = parameters.get("batch_size")
96
+ opt.epochs = parameters.get("epochs")
97
+
98
+ device = select_device(opt.device, batch_size=opt.batch_size)
99
+ train(hyp_dict, opt, device, callbacks=Callbacks())
100
+
101
+
102
+ if __name__ == "__main__":
103
+ opt = get_args(known=True)
104
+
105
+ opt.weights = str(opt.weights)
106
+ opt.cfg = str(opt.cfg)
107
+ opt.data = str(opt.data)
108
+ opt.project = str(opt.project)
109
+
110
+ optimizer_id = os.getenv("COMET_OPTIMIZER_ID")
111
+ if optimizer_id is None:
112
+ with open(opt.comet_optimizer_config) as f:
113
+ optimizer_config = json.load(f)
114
+ optimizer = comet_ml.Optimizer(optimizer_config)
115
+ else:
116
+ optimizer = comet_ml.Optimizer(optimizer_id)
117
+
118
+ opt.comet_optimizer_id = optimizer.id
119
+ status = optimizer.status()
120
+
121
+ opt.comet_optimizer_objective = status["spec"]["objective"]
122
+ opt.comet_optimizer_metric = status["spec"]["metric"]
123
+
124
+ logger.info("COMET INFO: Starting Hyperparameter Sweep")
125
+ for parameter in optimizer.get_parameters():
126
+ run(parameter["parameters"], opt)
@@ -0,0 +1,135 @@
1
+ {
2
+ "algorithm": "random",
3
+ "parameters": {
4
+ "anchor_t": {
5
+ "type": "discrete",
6
+ "values": [2, 8]
7
+ },
8
+ "batch_size": {
9
+ "type": "discrete",
10
+ "values": [16, 32, 64]
11
+ },
12
+ "box": {
13
+ "type": "discrete",
14
+ "values": [0.02, 0.2]
15
+ },
16
+ "cls": {
17
+ "type": "discrete",
18
+ "values": [0.2]
19
+ },
20
+ "cls_pw": {
21
+ "type": "discrete",
22
+ "values": [0.5]
23
+ },
24
+ "copy_paste": {
25
+ "type": "discrete",
26
+ "values": [1]
27
+ },
28
+ "degrees": {
29
+ "type": "discrete",
30
+ "values": [0, 45]
31
+ },
32
+ "epochs": {
33
+ "type": "discrete",
34
+ "values": [5]
35
+ },
36
+ "fl_gamma": {
37
+ "type": "discrete",
38
+ "values": [0]
39
+ },
40
+ "fliplr": {
41
+ "type": "discrete",
42
+ "values": [0]
43
+ },
44
+ "flipud": {
45
+ "type": "discrete",
46
+ "values": [0]
47
+ },
48
+ "hsv_h": {
49
+ "type": "discrete",
50
+ "values": [0]
51
+ },
52
+ "hsv_s": {
53
+ "type": "discrete",
54
+ "values": [0]
55
+ },
56
+ "hsv_v": {
57
+ "type": "discrete",
58
+ "values": [0]
59
+ },
60
+ "iou_t": {
61
+ "type": "discrete",
62
+ "values": [0.7]
63
+ },
64
+ "lr0": {
65
+ "type": "discrete",
66
+ "values": [1e-5, 0.1]
67
+ },
68
+ "lrf": {
69
+ "type": "discrete",
70
+ "values": [0.01, 1]
71
+ },
72
+ "mixup": {
73
+ "type": "discrete",
74
+ "values": [1]
75
+ },
76
+ "momentum": {
77
+ "type": "discrete",
78
+ "values": [0.6]
79
+ },
80
+ "mosaic": {
81
+ "type": "discrete",
82
+ "values": [0]
83
+ },
84
+ "obj": {
85
+ "type": "discrete",
86
+ "values": [0.2]
87
+ },
88
+ "obj_pw": {
89
+ "type": "discrete",
90
+ "values": [0.5]
91
+ },
92
+ "optimizer": {
93
+ "type": "categorical",
94
+ "values": ["SGD", "Adam", "AdamW"]
95
+ },
96
+ "perspective": {
97
+ "type": "discrete",
98
+ "values": [0]
99
+ },
100
+ "scale": {
101
+ "type": "discrete",
102
+ "values": [0]
103
+ },
104
+ "shear": {
105
+ "type": "discrete",
106
+ "values": [0]
107
+ },
108
+ "translate": {
109
+ "type": "discrete",
110
+ "values": [0]
111
+ },
112
+ "warmup_bias_lr": {
113
+ "type": "discrete",
114
+ "values": [0, 0.2]
115
+ },
116
+ "warmup_epochs": {
117
+ "type": "discrete",
118
+ "values": [5]
119
+ },
120
+ "warmup_momentum": {
121
+ "type": "discrete",
122
+ "values": [0, 0.95]
123
+ },
124
+ "weight_decay": {
125
+ "type": "discrete",
126
+ "values": [0, 0.001]
127
+ }
128
+ },
129
+ "spec": {
130
+ "maxCombo": 0,
131
+ "metric": "metrics/mAP_0.5",
132
+ "objective": "maximize"
133
+ },
134
+ "trials": 1
135
+ }
File without changes
@@ -0,0 +1,210 @@
1
+ # Ultralytics YOLOv5 🚀, AGPL-3.0 license
2
+
3
+ # WARNING ⚠️ wandb is deprecated and will be removed in future release.
4
+ # See supported integrations at https://github.com/ultralytics/yolov5#integrations
5
+
6
+ import logging
7
+ import os
8
+ import sys
9
+ from contextlib import contextmanager
10
+ from pathlib import Path
11
+
12
+ from utils.general import LOGGER, colorstr
13
+
14
+ FILE = Path(__file__).resolve()
15
+ ROOT = FILE.parents[3] # YOLOv5 root directory
16
+ if str(ROOT) not in sys.path:
17
+ sys.path.append(str(ROOT)) # add ROOT to PATH
18
+ RANK = int(os.getenv("RANK", -1))
19
+ DEPRECATION_WARNING = (
20
+ f"{colorstr('wandb')}: WARNING ⚠️ wandb is deprecated and will be removed in a future release. "
21
+ f'See supported integrations at https://github.com/ultralytics/yolov5#integrations.'
22
+ )
23
+
24
+ try:
25
+ import wandb
26
+
27
+ assert hasattr(wandb, "__version__") # verify package import not local dir
28
+ LOGGER.warning(DEPRECATION_WARNING)
29
+ except (ImportError, AssertionError):
30
+ wandb = None
31
+
32
+
33
+ class WandbLogger:
34
+ """
35
+ Log training runs, datasets, models, and predictions to Weights & Biases.
36
+
37
+ This logger sends information to W&B at wandb.ai. By default, this information includes hyperparameters, system
38
+ configuration and metrics, model metrics, and basic data metrics and analyses.
39
+
40
+ By providing additional command line arguments to train.py, datasets, models and predictions can also be logged.
41
+
42
+ For more on how this logger is used, see the Weights & Biases documentation:
43
+ https://docs.wandb.com/guides/integrations/yolov5
44
+ """
45
+
46
+ def __init__(self, opt, run_id=None, job_type="Training"):
47
+ """
48
+ - Initialize WandbLogger instance
49
+ - Upload dataset if opt.upload_dataset is True
50
+ - Setup training processes if job_type is 'Training'.
51
+
52
+ Arguments:
53
+ opt (namespace) -- Commandline arguments for this run
54
+ run_id (str) -- Run ID of W&B run to be resumed
55
+ job_type (str) -- To set the job_type for this run
56
+
57
+ """
58
+ # Pre-training routine --
59
+ self.job_type = job_type
60
+ self.wandb, self.wandb_run = wandb, wandb.run if wandb else None
61
+ self.val_artifact, self.train_artifact = None, None
62
+ self.train_artifact_path, self.val_artifact_path = None, None
63
+ self.result_artifact = None
64
+ self.val_table, self.result_table = None, None
65
+ self.max_imgs_to_log = 16
66
+ self.data_dict = None
67
+ if self.wandb:
68
+ self.wandb_run = wandb.run or wandb.init(
69
+ config=opt,
70
+ resume="allow",
71
+ project="YOLOv5" if opt.project == "runs/train" else Path(opt.project).stem,
72
+ entity=opt.entity,
73
+ name=opt.name if opt.name != "exp" else None,
74
+ job_type=job_type,
75
+ id=run_id,
76
+ allow_val_change=True,
77
+ )
78
+
79
+ if self.wandb_run and self.job_type == "Training":
80
+ if isinstance(opt.data, dict):
81
+ # This means another dataset manager has already processed the dataset info (e.g. ClearML)
82
+ # and they will have stored the already processed dict in opt.data
83
+ self.data_dict = opt.data
84
+ self.setup_training(opt)
85
+
86
+ def setup_training(self, opt):
87
+ """
88
+ Setup the necessary processes for training YOLO models:
89
+ - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX
90
+ - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded
91
+ - Setup log_dict, initialize bbox_interval.
92
+
93
+ Arguments:
94
+ opt (namespace) -- commandline arguments for this run
95
+
96
+ """
97
+ self.log_dict, self.current_epoch = {}, 0
98
+ self.bbox_interval = opt.bbox_interval
99
+ if isinstance(opt.resume, str):
100
+ model_dir, _ = self.download_model_artifact(opt)
101
+ if model_dir:
102
+ self.weights = Path(model_dir) / "last.pt"
103
+ config = self.wandb_run.config
104
+ opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = (
105
+ str(self.weights),
106
+ config.save_period,
107
+ config.batch_size,
108
+ config.bbox_interval,
109
+ config.epochs,
110
+ config.hyp,
111
+ config.imgsz,
112
+ )
113
+
114
+ if opt.bbox_interval == -1:
115
+ self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1
116
+ if opt.evolve or opt.noplots:
117
+ self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval
118
+
119
+ def log_model(self, path, opt, epoch, fitness_score, best_model=False):
120
+ """
121
+ Log the model checkpoint as W&B artifact.
122
+
123
+ Arguments:
124
+ path (Path) -- Path of directory containing the checkpoints
125
+ opt (namespace) -- Command line arguments for this run
126
+ epoch (int) -- Current epoch number
127
+ fitness_score (float) -- fitness score for current epoch
128
+ best_model (boolean) -- Boolean representing if the current checkpoint is the best yet.
129
+ """
130
+ model_artifact = wandb.Artifact(
131
+ f"run_{wandb.run.id}_model",
132
+ type="model",
133
+ metadata={
134
+ "original_url": str(path),
135
+ "epochs_trained": epoch + 1,
136
+ "save period": opt.save_period,
137
+ "project": opt.project,
138
+ "total_epochs": opt.epochs,
139
+ "fitness_score": fitness_score,
140
+ },
141
+ )
142
+ model_artifact.add_file(str(path / "last.pt"), name="last.pt")
143
+ wandb.log_artifact(
144
+ model_artifact,
145
+ aliases=[
146
+ "latest",
147
+ "last",
148
+ f"epoch {str(self.current_epoch)}",
149
+ "best" if best_model else "",
150
+ ],
151
+ )
152
+ LOGGER.info(f"Saving model artifact on epoch {epoch + 1}")
153
+
154
+ def val_one_image(self, pred, predn, path, names, im):
155
+ """Evaluates model prediction for a single image, returning metrics and visualizations."""
156
+ pass
157
+
158
+ def log(self, log_dict):
159
+ """
160
+ Save the metrics to the logging dictionary.
161
+
162
+ Arguments:
163
+ log_dict (Dict) -- metrics/media to be logged in current step
164
+ """
165
+ if self.wandb_run:
166
+ for key, value in log_dict.items():
167
+ self.log_dict[key] = value
168
+
169
+ def end_epoch(self):
170
+ """
171
+ Commit the log_dict, model artifacts and Tables to W&B and flush the log_dict.
172
+
173
+ Arguments:
174
+ best_result (boolean): Boolean representing if the result of this evaluation is best or not
175
+ """
176
+ if self.wandb_run:
177
+ with all_logging_disabled():
178
+ try:
179
+ wandb.log(self.log_dict)
180
+ except BaseException as e:
181
+ LOGGER.info(
182
+ f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}"
183
+ )
184
+ self.wandb_run.finish()
185
+ self.wandb_run = None
186
+ self.log_dict = {}
187
+
188
+ def finish_run(self):
189
+ """Log metrics if any and finish the current W&B run."""
190
+ if self.wandb_run:
191
+ if self.log_dict:
192
+ with all_logging_disabled():
193
+ wandb.log(self.log_dict)
194
+ wandb.run.finish()
195
+ LOGGER.warning(DEPRECATION_WARNING)
196
+
197
+
198
+ @contextmanager
199
+ def all_logging_disabled(highest_level=logging.CRITICAL):
200
+ """Source - https://gist.github.com/simon-weber/7853144
201
+ A context manager that will prevent any logging messages triggered during the body from being processed.
202
+ :param highest_level: the maximum logging level in use.
203
+ This would only need to be changed if a custom level greater than CRITICAL is defined.
204
+ """
205
+ previous_level = logging.root.manager.disable
206
+ logging.disable(highest_level)
207
+ try:
208
+ yield
209
+ finally:
210
+ logging.disable(previous_level)