bplusplus 1.1.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of bplusplus might be problematic. Click here for more details.

Files changed (97) hide show
  1. bplusplus/__init__.py +4 -2
  2. bplusplus/collect.py +69 -5
  3. bplusplus/hierarchical/test.py +670 -0
  4. bplusplus/hierarchical/train.py +676 -0
  5. bplusplus/prepare.py +228 -64
  6. bplusplus/resnet/test.py +473 -0
  7. bplusplus/resnet/train.py +329 -0
  8. bplusplus-1.2.0.dist-info/METADATA +249 -0
  9. bplusplus-1.2.0.dist-info/RECORD +12 -0
  10. bplusplus/yolov5detect/__init__.py +0 -1
  11. bplusplus/yolov5detect/detect.py +0 -444
  12. bplusplus/yolov5detect/export.py +0 -1530
  13. bplusplus/yolov5detect/insect.yaml +0 -8
  14. bplusplus/yolov5detect/models/__init__.py +0 -0
  15. bplusplus/yolov5detect/models/common.py +0 -1109
  16. bplusplus/yolov5detect/models/experimental.py +0 -130
  17. bplusplus/yolov5detect/models/hub/anchors.yaml +0 -56
  18. bplusplus/yolov5detect/models/hub/yolov3-spp.yaml +0 -52
  19. bplusplus/yolov5detect/models/hub/yolov3-tiny.yaml +0 -42
  20. bplusplus/yolov5detect/models/hub/yolov3.yaml +0 -52
  21. bplusplus/yolov5detect/models/hub/yolov5-bifpn.yaml +0 -49
  22. bplusplus/yolov5detect/models/hub/yolov5-fpn.yaml +0 -43
  23. bplusplus/yolov5detect/models/hub/yolov5-p2.yaml +0 -55
  24. bplusplus/yolov5detect/models/hub/yolov5-p34.yaml +0 -42
  25. bplusplus/yolov5detect/models/hub/yolov5-p6.yaml +0 -57
  26. bplusplus/yolov5detect/models/hub/yolov5-p7.yaml +0 -68
  27. bplusplus/yolov5detect/models/hub/yolov5-panet.yaml +0 -49
  28. bplusplus/yolov5detect/models/hub/yolov5l6.yaml +0 -61
  29. bplusplus/yolov5detect/models/hub/yolov5m6.yaml +0 -61
  30. bplusplus/yolov5detect/models/hub/yolov5n6.yaml +0 -61
  31. bplusplus/yolov5detect/models/hub/yolov5s-LeakyReLU.yaml +0 -50
  32. bplusplus/yolov5detect/models/hub/yolov5s-ghost.yaml +0 -49
  33. bplusplus/yolov5detect/models/hub/yolov5s-transformer.yaml +0 -49
  34. bplusplus/yolov5detect/models/hub/yolov5s6.yaml +0 -61
  35. bplusplus/yolov5detect/models/hub/yolov5x6.yaml +0 -61
  36. bplusplus/yolov5detect/models/segment/yolov5l-seg.yaml +0 -49
  37. bplusplus/yolov5detect/models/segment/yolov5m-seg.yaml +0 -49
  38. bplusplus/yolov5detect/models/segment/yolov5n-seg.yaml +0 -49
  39. bplusplus/yolov5detect/models/segment/yolov5s-seg.yaml +0 -49
  40. bplusplus/yolov5detect/models/segment/yolov5x-seg.yaml +0 -49
  41. bplusplus/yolov5detect/models/tf.py +0 -797
  42. bplusplus/yolov5detect/models/yolo.py +0 -495
  43. bplusplus/yolov5detect/models/yolov5l.yaml +0 -49
  44. bplusplus/yolov5detect/models/yolov5m.yaml +0 -49
  45. bplusplus/yolov5detect/models/yolov5n.yaml +0 -49
  46. bplusplus/yolov5detect/models/yolov5s.yaml +0 -49
  47. bplusplus/yolov5detect/models/yolov5x.yaml +0 -49
  48. bplusplus/yolov5detect/utils/__init__.py +0 -97
  49. bplusplus/yolov5detect/utils/activations.py +0 -134
  50. bplusplus/yolov5detect/utils/augmentations.py +0 -448
  51. bplusplus/yolov5detect/utils/autoanchor.py +0 -175
  52. bplusplus/yolov5detect/utils/autobatch.py +0 -70
  53. bplusplus/yolov5detect/utils/aws/__init__.py +0 -0
  54. bplusplus/yolov5detect/utils/aws/mime.sh +0 -26
  55. bplusplus/yolov5detect/utils/aws/resume.py +0 -41
  56. bplusplus/yolov5detect/utils/aws/userdata.sh +0 -27
  57. bplusplus/yolov5detect/utils/callbacks.py +0 -72
  58. bplusplus/yolov5detect/utils/dataloaders.py +0 -1385
  59. bplusplus/yolov5detect/utils/docker/Dockerfile +0 -73
  60. bplusplus/yolov5detect/utils/docker/Dockerfile-arm64 +0 -40
  61. bplusplus/yolov5detect/utils/docker/Dockerfile-cpu +0 -42
  62. bplusplus/yolov5detect/utils/downloads.py +0 -136
  63. bplusplus/yolov5detect/utils/flask_rest_api/README.md +0 -70
  64. bplusplus/yolov5detect/utils/flask_rest_api/example_request.py +0 -17
  65. bplusplus/yolov5detect/utils/flask_rest_api/restapi.py +0 -49
  66. bplusplus/yolov5detect/utils/general.py +0 -1294
  67. bplusplus/yolov5detect/utils/google_app_engine/Dockerfile +0 -25
  68. bplusplus/yolov5detect/utils/google_app_engine/additional_requirements.txt +0 -6
  69. bplusplus/yolov5detect/utils/google_app_engine/app.yaml +0 -16
  70. bplusplus/yolov5detect/utils/loggers/__init__.py +0 -476
  71. bplusplus/yolov5detect/utils/loggers/clearml/README.md +0 -222
  72. bplusplus/yolov5detect/utils/loggers/clearml/__init__.py +0 -0
  73. bplusplus/yolov5detect/utils/loggers/clearml/clearml_utils.py +0 -230
  74. bplusplus/yolov5detect/utils/loggers/clearml/hpo.py +0 -90
  75. bplusplus/yolov5detect/utils/loggers/comet/README.md +0 -250
  76. bplusplus/yolov5detect/utils/loggers/comet/__init__.py +0 -551
  77. bplusplus/yolov5detect/utils/loggers/comet/comet_utils.py +0 -151
  78. bplusplus/yolov5detect/utils/loggers/comet/hpo.py +0 -126
  79. bplusplus/yolov5detect/utils/loggers/comet/optimizer_config.json +0 -135
  80. bplusplus/yolov5detect/utils/loggers/wandb/__init__.py +0 -0
  81. bplusplus/yolov5detect/utils/loggers/wandb/wandb_utils.py +0 -210
  82. bplusplus/yolov5detect/utils/loss.py +0 -259
  83. bplusplus/yolov5detect/utils/metrics.py +0 -381
  84. bplusplus/yolov5detect/utils/plots.py +0 -517
  85. bplusplus/yolov5detect/utils/segment/__init__.py +0 -0
  86. bplusplus/yolov5detect/utils/segment/augmentations.py +0 -100
  87. bplusplus/yolov5detect/utils/segment/dataloaders.py +0 -366
  88. bplusplus/yolov5detect/utils/segment/general.py +0 -160
  89. bplusplus/yolov5detect/utils/segment/loss.py +0 -198
  90. bplusplus/yolov5detect/utils/segment/metrics.py +0 -225
  91. bplusplus/yolov5detect/utils/segment/plots.py +0 -152
  92. bplusplus/yolov5detect/utils/torch_utils.py +0 -482
  93. bplusplus/yolov5detect/utils/triton.py +0 -90
  94. bplusplus-1.1.0.dist-info/METADATA +0 -179
  95. bplusplus-1.1.0.dist-info/RECORD +0 -92
  96. {bplusplus-1.1.0.dist-info → bplusplus-1.2.0.dist-info}/LICENSE +0 -0
  97. {bplusplus-1.1.0.dist-info → bplusplus-1.2.0.dist-info}/WHEEL +0 -0
@@ -1,551 +0,0 @@
1
- # Ultralytics YOLOv5 🚀, AGPL-3.0 license
2
-
3
- import glob
4
- import json
5
- import logging
6
- import os
7
- import sys
8
- from pathlib import Path
9
-
10
- logger = logging.getLogger(__name__)
11
-
12
- FILE = Path(__file__).resolve()
13
- ROOT = FILE.parents[3] # YOLOv5 root directory
14
- if str(ROOT) not in sys.path:
15
- sys.path.append(str(ROOT)) # add ROOT to PATH
16
-
17
- try:
18
- import comet_ml
19
-
20
- # Project Configuration
21
- config = comet_ml.config.get_config()
22
- COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5")
23
- except ImportError:
24
- comet_ml = None
25
- COMET_PROJECT_NAME = None
26
-
27
- import PIL
28
- import torch
29
- import torchvision.transforms as T
30
- import yaml
31
-
32
- from utils.dataloaders import img2label_paths
33
- from utils.general import check_dataset, scale_boxes, xywh2xyxy
34
- from utils.metrics import box_iou
35
-
36
- COMET_PREFIX = "comet://"
37
-
38
- COMET_MODE = os.getenv("COMET_MODE", "online")
39
-
40
- # Model Saving Settings
41
- COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5")
42
-
43
- # Dataset Artifact Settings
44
- COMET_UPLOAD_DATASET = os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true"
45
-
46
- # Evaluation Settings
47
- COMET_LOG_CONFUSION_MATRIX = os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true"
48
- COMET_LOG_PREDICTIONS = os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true"
49
- COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100))
50
-
51
- # Confusion Matrix Settings
52
- CONF_THRES = float(os.getenv("CONF_THRES", 0.001))
53
- IOU_THRES = float(os.getenv("IOU_THRES", 0.6))
54
-
55
- # Batch Logging Settings
56
- COMET_LOG_BATCH_METRICS = os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true"
57
- COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1)
58
- COMET_PREDICTION_LOGGING_INTERVAL = os.getenv("COMET_PREDICTION_LOGGING_INTERVAL", 1)
59
- COMET_LOG_PER_CLASS_METRICS = os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true"
60
-
61
- RANK = int(os.getenv("RANK", -1))
62
-
63
- to_pil = T.ToPILImage()
64
-
65
-
66
- class CometLogger:
67
- """Log metrics, parameters, source code, models and much more with Comet."""
68
-
69
- def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None:
70
- """Initializes CometLogger with given options, hyperparameters, run ID, job type, and additional experiment
71
- arguments.
72
- """
73
- self.job_type = job_type
74
- self.opt = opt
75
- self.hyp = hyp
76
-
77
- # Comet Flags
78
- self.comet_mode = COMET_MODE
79
-
80
- self.save_model = opt.save_period > -1
81
- self.model_name = COMET_MODEL_NAME
82
-
83
- # Batch Logging Settings
84
- self.log_batch_metrics = COMET_LOG_BATCH_METRICS
85
- self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL
86
-
87
- # Dataset Artifact Settings
88
- self.upload_dataset = self.opt.upload_dataset or COMET_UPLOAD_DATASET
89
- self.resume = self.opt.resume
90
-
91
- # Default parameters to pass to Experiment objects
92
- self.default_experiment_kwargs = {
93
- "log_code": False,
94
- "log_env_gpu": True,
95
- "log_env_cpu": True,
96
- "project_name": COMET_PROJECT_NAME,
97
- }
98
- self.default_experiment_kwargs.update(experiment_kwargs)
99
- self.experiment = self._get_experiment(self.comet_mode, run_id)
100
- self.experiment.set_name(self.opt.name)
101
-
102
- self.data_dict = self.check_dataset(self.opt.data)
103
- self.class_names = self.data_dict["names"]
104
- self.num_classes = self.data_dict["nc"]
105
-
106
- self.logged_images_count = 0
107
- self.max_images = COMET_MAX_IMAGE_UPLOADS
108
-
109
- if run_id is None:
110
- self.experiment.log_other("Created from", "YOLOv5")
111
- if not isinstance(self.experiment, comet_ml.OfflineExperiment):
112
- workspace, project_name, experiment_id = self.experiment.url.split("/")[-3:]
113
- self.experiment.log_other(
114
- "Run Path",
115
- f"{workspace}/{project_name}/{experiment_id}",
116
- )
117
- self.log_parameters(vars(opt))
118
- self.log_parameters(self.opt.hyp)
119
- self.log_asset_data(
120
- self.opt.hyp,
121
- name="hyperparameters.json",
122
- metadata={"type": "hyp-config-file"},
123
- )
124
- self.log_asset(
125
- f"{self.opt.save_dir}/opt.yaml",
126
- metadata={"type": "opt-config-file"},
127
- )
128
-
129
- self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX
130
-
131
- if hasattr(self.opt, "conf_thres"):
132
- self.conf_thres = self.opt.conf_thres
133
- else:
134
- self.conf_thres = CONF_THRES
135
- if hasattr(self.opt, "iou_thres"):
136
- self.iou_thres = self.opt.iou_thres
137
- else:
138
- self.iou_thres = IOU_THRES
139
-
140
- self.log_parameters({"val_iou_threshold": self.iou_thres, "val_conf_threshold": self.conf_thres})
141
-
142
- self.comet_log_predictions = COMET_LOG_PREDICTIONS
143
- if self.opt.bbox_interval == -1:
144
- self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10
145
- else:
146
- self.comet_log_prediction_interval = self.opt.bbox_interval
147
-
148
- if self.comet_log_predictions:
149
- self.metadata_dict = {}
150
- self.logged_image_names = []
151
-
152
- self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS
153
-
154
- self.experiment.log_others(
155
- {
156
- "comet_mode": COMET_MODE,
157
- "comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS,
158
- "comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS,
159
- "comet_log_batch_metrics": COMET_LOG_BATCH_METRICS,
160
- "comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX,
161
- "comet_model_name": COMET_MODEL_NAME,
162
- }
163
- )
164
-
165
- # Check if running the Experiment with the Comet Optimizer
166
- if hasattr(self.opt, "comet_optimizer_id"):
167
- self.experiment.log_other("optimizer_id", self.opt.comet_optimizer_id)
168
- self.experiment.log_other("optimizer_objective", self.opt.comet_optimizer_objective)
169
- self.experiment.log_other("optimizer_metric", self.opt.comet_optimizer_metric)
170
- self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp))
171
-
172
- def _get_experiment(self, mode, experiment_id=None):
173
- """Returns a new or existing Comet.ml experiment based on mode and optional experiment_id."""
174
- if mode == "offline":
175
- return (
176
- comet_ml.ExistingOfflineExperiment(
177
- previous_experiment=experiment_id,
178
- **self.default_experiment_kwargs,
179
- )
180
- if experiment_id is not None
181
- else comet_ml.OfflineExperiment(
182
- **self.default_experiment_kwargs,
183
- )
184
- )
185
- try:
186
- if experiment_id is not None:
187
- return comet_ml.ExistingExperiment(
188
- previous_experiment=experiment_id,
189
- **self.default_experiment_kwargs,
190
- )
191
-
192
- return comet_ml.Experiment(**self.default_experiment_kwargs)
193
-
194
- except ValueError:
195
- logger.warning(
196
- "COMET WARNING: "
197
- "Comet credentials have not been set. "
198
- "Comet will default to offline logging. "
199
- "Please set your credentials to enable online logging."
200
- )
201
- return self._get_experiment("offline", experiment_id)
202
-
203
- return
204
-
205
- def log_metrics(self, log_dict, **kwargs):
206
- """Logs metrics to the current experiment, accepting a dictionary of metric names and values."""
207
- self.experiment.log_metrics(log_dict, **kwargs)
208
-
209
- def log_parameters(self, log_dict, **kwargs):
210
- """Logs parameters to the current experiment, accepting a dictionary of parameter names and values."""
211
- self.experiment.log_parameters(log_dict, **kwargs)
212
-
213
- def log_asset(self, asset_path, **kwargs):
214
- """Logs a file or directory as an asset to the current experiment."""
215
- self.experiment.log_asset(asset_path, **kwargs)
216
-
217
- def log_asset_data(self, asset, **kwargs):
218
- """Logs in-memory data as an asset to the current experiment, with optional kwargs."""
219
- self.experiment.log_asset_data(asset, **kwargs)
220
-
221
- def log_image(self, img, **kwargs):
222
- """Logs an image to the current experiment with optional kwargs."""
223
- self.experiment.log_image(img, **kwargs)
224
-
225
- def log_model(self, path, opt, epoch, fitness_score, best_model=False):
226
- """Logs model checkpoint to experiment with path, options, epoch, fitness, and best model flag."""
227
- if not self.save_model:
228
- return
229
-
230
- model_metadata = {
231
- "fitness_score": fitness_score[-1],
232
- "epochs_trained": epoch + 1,
233
- "save_period": opt.save_period,
234
- "total_epochs": opt.epochs,
235
- }
236
-
237
- model_files = glob.glob(f"{path}/*.pt")
238
- for model_path in model_files:
239
- name = Path(model_path).name
240
-
241
- self.experiment.log_model(
242
- self.model_name,
243
- file_or_folder=model_path,
244
- file_name=name,
245
- metadata=model_metadata,
246
- overwrite=True,
247
- )
248
-
249
- def check_dataset(self, data_file):
250
- """Validates the dataset configuration by loading the YAML file specified in `data_file`."""
251
- with open(data_file) as f:
252
- data_config = yaml.safe_load(f)
253
-
254
- path = data_config.get("path")
255
- if path and path.startswith(COMET_PREFIX):
256
- path = data_config["path"].replace(COMET_PREFIX, "")
257
- return self.download_dataset_artifact(path)
258
- self.log_asset(self.opt.data, metadata={"type": "data-config-file"})
259
-
260
- return check_dataset(data_file)
261
-
262
- def log_predictions(self, image, labelsn, path, shape, predn):
263
- """Logs predictions with IOU filtering, given image, labels, path, shape, and predictions."""
264
- if self.logged_images_count >= self.max_images:
265
- return
266
- detections = predn[predn[:, 4] > self.conf_thres]
267
- iou = box_iou(labelsn[:, 1:], detections[:, :4])
268
- mask, _ = torch.where(iou > self.iou_thres)
269
- if len(mask) == 0:
270
- return
271
-
272
- filtered_detections = detections[mask]
273
- filtered_labels = labelsn[mask]
274
-
275
- image_id = path.split("/")[-1].split(".")[0]
276
- image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}"
277
- if image_name not in self.logged_image_names:
278
- native_scale_image = PIL.Image.open(path)
279
- self.log_image(native_scale_image, name=image_name)
280
- self.logged_image_names.append(image_name)
281
-
282
- metadata = [
283
- {
284
- "label": f"{self.class_names[int(cls)]}-gt",
285
- "score": 100,
286
- "box": {"x": xyxy[0], "y": xyxy[1], "x2": xyxy[2], "y2": xyxy[3]},
287
- }
288
- for cls, *xyxy in filtered_labels.tolist()
289
- ]
290
- metadata.extend(
291
- {
292
- "label": f"{self.class_names[int(cls)]}",
293
- "score": conf * 100,
294
- "box": {"x": xyxy[0], "y": xyxy[1], "x2": xyxy[2], "y2": xyxy[3]},
295
- }
296
- for *xyxy, conf, cls in filtered_detections.tolist()
297
- )
298
- self.metadata_dict[image_name] = metadata
299
- self.logged_images_count += 1
300
-
301
- return
302
-
303
- def preprocess_prediction(self, image, labels, shape, pred):
304
- """Processes prediction data, resizing labels and adding dataset metadata."""
305
- nl, _ = labels.shape[0], pred.shape[0]
306
-
307
- # Predictions
308
- if self.opt.single_cls:
309
- pred[:, 5] = 0
310
-
311
- predn = pred.clone()
312
- scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1])
313
-
314
- labelsn = None
315
- if nl:
316
- tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
317
- scale_boxes(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels
318
- labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
319
- scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred
320
-
321
- return predn, labelsn
322
-
323
- def add_assets_to_artifact(self, artifact, path, asset_path, split):
324
- """Adds image and label assets to a wandb artifact given dataset split and paths."""
325
- img_paths = sorted(glob.glob(f"{asset_path}/*"))
326
- label_paths = img2label_paths(img_paths)
327
-
328
- for image_file, label_file in zip(img_paths, label_paths):
329
- image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file])
330
-
331
- try:
332
- artifact.add(
333
- image_file,
334
- logical_path=image_logical_path,
335
- metadata={"split": split},
336
- )
337
- artifact.add(
338
- label_file,
339
- logical_path=label_logical_path,
340
- metadata={"split": split},
341
- )
342
- except ValueError as e:
343
- logger.error("COMET ERROR: Error adding file to Artifact. Skipping file.")
344
- logger.error(f"COMET ERROR: {e}")
345
- continue
346
-
347
- return artifact
348
-
349
- def upload_dataset_artifact(self):
350
- """Uploads a YOLOv5 dataset as an artifact to the Comet.ml platform."""
351
- dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset")
352
- path = str((ROOT / Path(self.data_dict["path"])).resolve())
353
-
354
- metadata = self.data_dict.copy()
355
- for key in ["train", "val", "test"]:
356
- split_path = metadata.get(key)
357
- if split_path is not None:
358
- metadata[key] = split_path.replace(path, "")
359
-
360
- artifact = comet_ml.Artifact(name=dataset_name, artifact_type="dataset", metadata=metadata)
361
- for key in metadata.keys():
362
- if key in ["train", "val", "test"]:
363
- if isinstance(self.upload_dataset, str) and (key != self.upload_dataset):
364
- continue
365
-
366
- asset_path = self.data_dict.get(key)
367
- if asset_path is not None:
368
- artifact = self.add_assets_to_artifact(artifact, path, asset_path, key)
369
-
370
- self.experiment.log_artifact(artifact)
371
-
372
- return
373
-
374
- def download_dataset_artifact(self, artifact_path):
375
- """Downloads a dataset artifact to a specified directory using the experiment's logged artifact."""
376
- logged_artifact = self.experiment.get_artifact(artifact_path)
377
- artifact_save_dir = str(Path(self.opt.save_dir) / logged_artifact.name)
378
- logged_artifact.download(artifact_save_dir)
379
-
380
- metadata = logged_artifact.metadata
381
- data_dict = metadata.copy()
382
- data_dict["path"] = artifact_save_dir
383
-
384
- metadata_names = metadata.get("names")
385
- if isinstance(metadata_names, dict):
386
- data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()}
387
- elif isinstance(metadata_names, list):
388
- data_dict["names"] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)}
389
- else:
390
- raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary"
391
-
392
- return self.update_data_paths(data_dict)
393
-
394
- def update_data_paths(self, data_dict):
395
- """Updates data paths in the dataset dictionary, defaulting 'path' to an empty string if not present."""
396
- path = data_dict.get("path", "")
397
-
398
- for split in ["train", "val", "test"]:
399
- if data_dict.get(split):
400
- split_path = data_dict.get(split)
401
- data_dict[split] = (
402
- f"{path}/{split_path}" if isinstance(split, str) else [f"{path}/{x}" for x in split_path]
403
- )
404
-
405
- return data_dict
406
-
407
- def on_pretrain_routine_end(self, paths):
408
- """Called at the end of pretraining routine to handle paths if training is not being resumed."""
409
- if self.opt.resume:
410
- return
411
-
412
- for path in paths:
413
- self.log_asset(str(path))
414
-
415
- if self.upload_dataset and not self.resume:
416
- self.upload_dataset_artifact()
417
-
418
- return
419
-
420
- def on_train_start(self):
421
- """Logs hyperparameters at the start of training."""
422
- self.log_parameters(self.hyp)
423
-
424
- def on_train_epoch_start(self):
425
- """Called at the start of each training epoch."""
426
- return
427
-
428
- def on_train_epoch_end(self, epoch):
429
- """Updates the current epoch in the experiment tracking at the end of each epoch."""
430
- self.experiment.curr_epoch = epoch
431
-
432
- return
433
-
434
- def on_train_batch_start(self):
435
- """Called at the start of each training batch."""
436
- return
437
-
438
- def on_train_batch_end(self, log_dict, step):
439
- """Callback function that updates and logs metrics at the end of each training batch if conditions are met."""
440
- self.experiment.curr_step = step
441
- if self.log_batch_metrics and (step % self.comet_log_batch_interval == 0):
442
- self.log_metrics(log_dict, step=step)
443
-
444
- return
445
-
446
- def on_train_end(self, files, save_dir, last, best, epoch, results):
447
- """Logs metadata and optionally saves model files at the end of training."""
448
- if self.comet_log_predictions:
449
- curr_epoch = self.experiment.curr_epoch
450
- self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch)
451
-
452
- for f in files:
453
- self.log_asset(f, metadata={"epoch": epoch})
454
- self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch})
455
-
456
- if not self.opt.evolve:
457
- model_path = str(best if best.exists() else last)
458
- name = Path(model_path).name
459
- if self.save_model:
460
- self.experiment.log_model(
461
- self.model_name,
462
- file_or_folder=model_path,
463
- file_name=name,
464
- overwrite=True,
465
- )
466
-
467
- # Check if running Experiment with Comet Optimizer
468
- if hasattr(self.opt, "comet_optimizer_id"):
469
- metric = results.get(self.opt.comet_optimizer_metric)
470
- self.experiment.log_other("optimizer_metric_value", metric)
471
-
472
- self.finish_run()
473
-
474
- def on_val_start(self):
475
- """Called at the start of validation, currently a placeholder with no functionality."""
476
- return
477
-
478
- def on_val_batch_start(self):
479
- """Placeholder called at the start of a validation batch with no current functionality."""
480
- return
481
-
482
- def on_val_batch_end(self, batch_i, images, targets, paths, shapes, outputs):
483
- """Callback executed at the end of a validation batch, conditionally logs predictions to Comet ML."""
484
- if not (self.comet_log_predictions and ((batch_i + 1) % self.comet_log_prediction_interval == 0)):
485
- return
486
-
487
- for si, pred in enumerate(outputs):
488
- if len(pred) == 0:
489
- continue
490
-
491
- image = images[si]
492
- labels = targets[targets[:, 0] == si, 1:]
493
- shape = shapes[si]
494
- path = paths[si]
495
- predn, labelsn = self.preprocess_prediction(image, labels, shape, pred)
496
- if labelsn is not None:
497
- self.log_predictions(image, labelsn, path, shape, predn)
498
-
499
- return
500
-
501
- def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix):
502
- """Logs per-class metrics to Comet.ml after validation if enabled and more than one class exists."""
503
- if self.comet_log_per_class_metrics and self.num_classes > 1:
504
- for i, c in enumerate(ap_class):
505
- class_name = self.class_names[c]
506
- self.experiment.log_metrics(
507
- {
508
- "mAP@.5": ap50[i],
509
- "mAP@.5:.95": ap[i],
510
- "precision": p[i],
511
- "recall": r[i],
512
- "f1": f1[i],
513
- "true_positives": tp[i],
514
- "false_positives": fp[i],
515
- "support": nt[c],
516
- },
517
- prefix=class_name,
518
- )
519
-
520
- if self.comet_log_confusion_matrix:
521
- epoch = self.experiment.curr_epoch
522
- class_names = list(self.class_names.values())
523
- class_names.append("background")
524
- num_classes = len(class_names)
525
-
526
- self.experiment.log_confusion_matrix(
527
- matrix=confusion_matrix.matrix,
528
- max_categories=num_classes,
529
- labels=class_names,
530
- epoch=epoch,
531
- column_label="Actual Category",
532
- row_label="Predicted Category",
533
- file_name=f"confusion-matrix-epoch-{epoch}.json",
534
- )
535
-
536
- def on_fit_epoch_end(self, result, epoch):
537
- """Logs metrics at the end of each training epoch."""
538
- self.log_metrics(result, epoch=epoch)
539
-
540
- def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
541
- """Callback to save model checkpoints periodically if conditions are met."""
542
- if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
543
- self.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
544
-
545
- def on_params_update(self, params):
546
- """Logs updated parameters during training."""
547
- self.log_parameters(params)
548
-
549
- def finish_run(self):
550
- """Ends the current experiment and logs its completion."""
551
- self.experiment.end()
@@ -1,151 +0,0 @@
1
- # Ultralytics YOLOv5 🚀, AGPL-3.0 license
2
-
3
- import logging
4
- import os
5
- from urllib.parse import urlparse
6
-
7
- try:
8
- import comet_ml
9
- except ImportError:
10
- comet_ml = None
11
-
12
- import yaml
13
-
14
- logger = logging.getLogger(__name__)
15
-
16
- COMET_PREFIX = "comet://"
17
- COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5")
18
- COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv("COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt")
19
-
20
-
21
- def download_model_checkpoint(opt, experiment):
22
- """Downloads YOLOv5 model checkpoint from Comet ML experiment, updating `opt.weights` with download path."""
23
- model_dir = f"{opt.project}/{experiment.name}"
24
- os.makedirs(model_dir, exist_ok=True)
25
-
26
- model_name = COMET_MODEL_NAME
27
- model_asset_list = experiment.get_model_asset_list(model_name)
28
-
29
- if len(model_asset_list) == 0:
30
- logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}")
31
- return
32
-
33
- model_asset_list = sorted(
34
- model_asset_list,
35
- key=lambda x: x["step"],
36
- reverse=True,
37
- )
38
- logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list}
39
-
40
- resource_url = urlparse(opt.weights)
41
- checkpoint_filename = resource_url.query
42
-
43
- if checkpoint_filename:
44
- asset_id = logged_checkpoint_map.get(checkpoint_filename)
45
- else:
46
- asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME)
47
- checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME
48
-
49
- if asset_id is None:
50
- logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment")
51
- return
52
-
53
- try:
54
- logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}")
55
- asset_filename = checkpoint_filename
56
-
57
- model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False)
58
- model_download_path = f"{model_dir}/{asset_filename}"
59
- with open(model_download_path, "wb") as f:
60
- f.write(model_binary)
61
-
62
- opt.weights = model_download_path
63
-
64
- except Exception as e:
65
- logger.warning("COMET WARNING: Unable to download checkpoint from Comet")
66
- logger.exception(e)
67
-
68
-
69
- def set_opt_parameters(opt, experiment):
70
- """
71
- Update the opts Namespace with parameters from Comet's ExistingExperiment when resuming a run.
72
-
73
- Args:
74
- opt (argparse.Namespace): Namespace of command line options
75
- experiment (comet_ml.APIExperiment): Comet API Experiment object
76
- """
77
- asset_list = experiment.get_asset_list()
78
- resume_string = opt.resume
79
-
80
- for asset in asset_list:
81
- if asset["fileName"] == "opt.yaml":
82
- asset_id = asset["assetId"]
83
- asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False)
84
- opt_dict = yaml.safe_load(asset_binary)
85
- for key, value in opt_dict.items():
86
- setattr(opt, key, value)
87
- opt.resume = resume_string
88
-
89
- # Save hyperparameters to YAML file
90
- # Necessary to pass checks in training script
91
- save_dir = f"{opt.project}/{experiment.name}"
92
- os.makedirs(save_dir, exist_ok=True)
93
-
94
- hyp_yaml_path = f"{save_dir}/hyp.yaml"
95
- with open(hyp_yaml_path, "w") as f:
96
- yaml.dump(opt.hyp, f)
97
- opt.hyp = hyp_yaml_path
98
-
99
-
100
- def check_comet_weights(opt):
101
- """
102
- Downloads model weights from Comet and updates the weights path to point to saved weights location.
103
-
104
- Args:
105
- opt (argparse.Namespace): Command Line arguments passed
106
- to YOLOv5 training script
107
-
108
- Returns:
109
- None/bool: Return True if weights are successfully downloaded
110
- else return None
111
- """
112
- if comet_ml is None:
113
- return
114
-
115
- if isinstance(opt.weights, str) and opt.weights.startswith(COMET_PREFIX):
116
- api = comet_ml.API()
117
- resource = urlparse(opt.weights)
118
- experiment_path = f"{resource.netloc}{resource.path}"
119
- experiment = api.get(experiment_path)
120
- download_model_checkpoint(opt, experiment)
121
- return True
122
-
123
- return None
124
-
125
-
126
- def check_comet_resume(opt):
127
- """
128
- Restores run parameters to its original state based on the model checkpoint and logged Experiment parameters.
129
-
130
- Args:
131
- opt (argparse.Namespace): Command Line arguments passed
132
- to YOLOv5 training script
133
-
134
- Returns:
135
- None/bool: Return True if the run is restored successfully
136
- else return None
137
- """
138
- if comet_ml is None:
139
- return
140
-
141
- if isinstance(opt.resume, str) and opt.resume.startswith(COMET_PREFIX):
142
- api = comet_ml.API()
143
- resource = urlparse(opt.resume)
144
- experiment_path = f"{resource.netloc}{resource.path}"
145
- experiment = api.get(experiment_path)
146
- set_opt_parameters(opt, experiment)
147
- download_model_checkpoint(opt, experiment)
148
-
149
- return True
150
-
151
- return None