bplusplus 1.1.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of bplusplus might be problematic. Click here for more details.

Files changed (97) hide show
  1. bplusplus/__init__.py +4 -2
  2. bplusplus/collect.py +69 -5
  3. bplusplus/hierarchical/test.py +670 -0
  4. bplusplus/hierarchical/train.py +676 -0
  5. bplusplus/prepare.py +228 -64
  6. bplusplus/resnet/test.py +473 -0
  7. bplusplus/resnet/train.py +329 -0
  8. bplusplus-1.2.0.dist-info/METADATA +249 -0
  9. bplusplus-1.2.0.dist-info/RECORD +12 -0
  10. bplusplus/yolov5detect/__init__.py +0 -1
  11. bplusplus/yolov5detect/detect.py +0 -444
  12. bplusplus/yolov5detect/export.py +0 -1530
  13. bplusplus/yolov5detect/insect.yaml +0 -8
  14. bplusplus/yolov5detect/models/__init__.py +0 -0
  15. bplusplus/yolov5detect/models/common.py +0 -1109
  16. bplusplus/yolov5detect/models/experimental.py +0 -130
  17. bplusplus/yolov5detect/models/hub/anchors.yaml +0 -56
  18. bplusplus/yolov5detect/models/hub/yolov3-spp.yaml +0 -52
  19. bplusplus/yolov5detect/models/hub/yolov3-tiny.yaml +0 -42
  20. bplusplus/yolov5detect/models/hub/yolov3.yaml +0 -52
  21. bplusplus/yolov5detect/models/hub/yolov5-bifpn.yaml +0 -49
  22. bplusplus/yolov5detect/models/hub/yolov5-fpn.yaml +0 -43
  23. bplusplus/yolov5detect/models/hub/yolov5-p2.yaml +0 -55
  24. bplusplus/yolov5detect/models/hub/yolov5-p34.yaml +0 -42
  25. bplusplus/yolov5detect/models/hub/yolov5-p6.yaml +0 -57
  26. bplusplus/yolov5detect/models/hub/yolov5-p7.yaml +0 -68
  27. bplusplus/yolov5detect/models/hub/yolov5-panet.yaml +0 -49
  28. bplusplus/yolov5detect/models/hub/yolov5l6.yaml +0 -61
  29. bplusplus/yolov5detect/models/hub/yolov5m6.yaml +0 -61
  30. bplusplus/yolov5detect/models/hub/yolov5n6.yaml +0 -61
  31. bplusplus/yolov5detect/models/hub/yolov5s-LeakyReLU.yaml +0 -50
  32. bplusplus/yolov5detect/models/hub/yolov5s-ghost.yaml +0 -49
  33. bplusplus/yolov5detect/models/hub/yolov5s-transformer.yaml +0 -49
  34. bplusplus/yolov5detect/models/hub/yolov5s6.yaml +0 -61
  35. bplusplus/yolov5detect/models/hub/yolov5x6.yaml +0 -61
  36. bplusplus/yolov5detect/models/segment/yolov5l-seg.yaml +0 -49
  37. bplusplus/yolov5detect/models/segment/yolov5m-seg.yaml +0 -49
  38. bplusplus/yolov5detect/models/segment/yolov5n-seg.yaml +0 -49
  39. bplusplus/yolov5detect/models/segment/yolov5s-seg.yaml +0 -49
  40. bplusplus/yolov5detect/models/segment/yolov5x-seg.yaml +0 -49
  41. bplusplus/yolov5detect/models/tf.py +0 -797
  42. bplusplus/yolov5detect/models/yolo.py +0 -495
  43. bplusplus/yolov5detect/models/yolov5l.yaml +0 -49
  44. bplusplus/yolov5detect/models/yolov5m.yaml +0 -49
  45. bplusplus/yolov5detect/models/yolov5n.yaml +0 -49
  46. bplusplus/yolov5detect/models/yolov5s.yaml +0 -49
  47. bplusplus/yolov5detect/models/yolov5x.yaml +0 -49
  48. bplusplus/yolov5detect/utils/__init__.py +0 -97
  49. bplusplus/yolov5detect/utils/activations.py +0 -134
  50. bplusplus/yolov5detect/utils/augmentations.py +0 -448
  51. bplusplus/yolov5detect/utils/autoanchor.py +0 -175
  52. bplusplus/yolov5detect/utils/autobatch.py +0 -70
  53. bplusplus/yolov5detect/utils/aws/__init__.py +0 -0
  54. bplusplus/yolov5detect/utils/aws/mime.sh +0 -26
  55. bplusplus/yolov5detect/utils/aws/resume.py +0 -41
  56. bplusplus/yolov5detect/utils/aws/userdata.sh +0 -27
  57. bplusplus/yolov5detect/utils/callbacks.py +0 -72
  58. bplusplus/yolov5detect/utils/dataloaders.py +0 -1385
  59. bplusplus/yolov5detect/utils/docker/Dockerfile +0 -73
  60. bplusplus/yolov5detect/utils/docker/Dockerfile-arm64 +0 -40
  61. bplusplus/yolov5detect/utils/docker/Dockerfile-cpu +0 -42
  62. bplusplus/yolov5detect/utils/downloads.py +0 -136
  63. bplusplus/yolov5detect/utils/flask_rest_api/README.md +0 -70
  64. bplusplus/yolov5detect/utils/flask_rest_api/example_request.py +0 -17
  65. bplusplus/yolov5detect/utils/flask_rest_api/restapi.py +0 -49
  66. bplusplus/yolov5detect/utils/general.py +0 -1294
  67. bplusplus/yolov5detect/utils/google_app_engine/Dockerfile +0 -25
  68. bplusplus/yolov5detect/utils/google_app_engine/additional_requirements.txt +0 -6
  69. bplusplus/yolov5detect/utils/google_app_engine/app.yaml +0 -16
  70. bplusplus/yolov5detect/utils/loggers/__init__.py +0 -476
  71. bplusplus/yolov5detect/utils/loggers/clearml/README.md +0 -222
  72. bplusplus/yolov5detect/utils/loggers/clearml/__init__.py +0 -0
  73. bplusplus/yolov5detect/utils/loggers/clearml/clearml_utils.py +0 -230
  74. bplusplus/yolov5detect/utils/loggers/clearml/hpo.py +0 -90
  75. bplusplus/yolov5detect/utils/loggers/comet/README.md +0 -250
  76. bplusplus/yolov5detect/utils/loggers/comet/__init__.py +0 -551
  77. bplusplus/yolov5detect/utils/loggers/comet/comet_utils.py +0 -151
  78. bplusplus/yolov5detect/utils/loggers/comet/hpo.py +0 -126
  79. bplusplus/yolov5detect/utils/loggers/comet/optimizer_config.json +0 -135
  80. bplusplus/yolov5detect/utils/loggers/wandb/__init__.py +0 -0
  81. bplusplus/yolov5detect/utils/loggers/wandb/wandb_utils.py +0 -210
  82. bplusplus/yolov5detect/utils/loss.py +0 -259
  83. bplusplus/yolov5detect/utils/metrics.py +0 -381
  84. bplusplus/yolov5detect/utils/plots.py +0 -517
  85. bplusplus/yolov5detect/utils/segment/__init__.py +0 -0
  86. bplusplus/yolov5detect/utils/segment/augmentations.py +0 -100
  87. bplusplus/yolov5detect/utils/segment/dataloaders.py +0 -366
  88. bplusplus/yolov5detect/utils/segment/general.py +0 -160
  89. bplusplus/yolov5detect/utils/segment/loss.py +0 -198
  90. bplusplus/yolov5detect/utils/segment/metrics.py +0 -225
  91. bplusplus/yolov5detect/utils/segment/plots.py +0 -152
  92. bplusplus/yolov5detect/utils/torch_utils.py +0 -482
  93. bplusplus/yolov5detect/utils/triton.py +0 -90
  94. bplusplus-1.1.0.dist-info/METADATA +0 -179
  95. bplusplus-1.1.0.dist-info/RECORD +0 -92
  96. {bplusplus-1.1.0.dist-info → bplusplus-1.2.0.dist-info}/LICENSE +0 -0
  97. {bplusplus-1.1.0.dist-info → bplusplus-1.2.0.dist-info}/WHEEL +0 -0
bplusplus/prepare.py CHANGED
@@ -5,7 +5,7 @@ import requests
5
5
  import tempfile
6
6
  from .collect import Group, collect
7
7
  from pathlib import Path
8
- from .yolov5detect.detect import run
8
+ from ultralytics import YOLO
9
9
  import shutil
10
10
  from PIL import Image, ImageDraw, ImageFont
11
11
  from collections import defaultdict
@@ -14,8 +14,27 @@ import matplotlib.pyplot as plt
14
14
  import requests
15
15
  from tqdm import tqdm
16
16
  import yaml
17
-
18
- def prepare(input_directory: str, output_directory: str, with_background: bool = False):
17
+ import torch
18
+ from torch import serialization
19
+ from ultralytics.nn.tasks import DetectionModel
20
+ from torch.nn.modules.container import Sequential
21
+ from ultralytics.nn.modules.conv import Conv
22
+ from torch.nn.modules.conv import Conv2d
23
+ # Add more modules to prevent further errors
24
+ from torch.nn.modules.batchnorm import BatchNorm2d
25
+ from torch.nn.modules.activation import SiLU, ReLU, LeakyReLU
26
+ from torch.nn.modules.pooling import MaxPool2d
27
+ from torch.nn.modules.linear import Linear
28
+ from torch.nn.modules.dropout import Dropout
29
+ from torch.nn.modules.upsampling import Upsample
30
+ from torch.nn import Module, ModuleList, ModuleDict
31
+ from ultralytics.nn.modules import (
32
+ Bottleneck, C2f, SPPF, Detect, Concat
33
+ )
34
+ from ultralytics.nn.modules.block import DFL
35
+ import numpy as np
36
+
37
+ def prepare(input_directory: str, output_directory: str, one_stage: bool = False, with_background: bool = False, size_filter: bool = False, sizes: list = None):
19
38
 
20
39
  """
21
40
  Prepares the dataset for training by performing the following steps:
@@ -43,12 +62,8 @@ def prepare(input_directory: str, output_directory: str, with_background: bool =
43
62
 
44
63
  temp_dir_path = Path(temp_dir)
45
64
  images_path = temp_dir_path / "images"
46
- inference_path = temp_dir_path / "inference"
47
- labels_path = temp_dir_path / "labels"
48
65
 
49
66
  images_path.mkdir(parents=True, exist_ok=True)
50
- inference_path.mkdir(parents=True, exist_ok=True)
51
- labels_path.mkdir(parents=True, exist_ok=True)
52
67
 
53
68
  for folder_directory in input_directory.iterdir():
54
69
  images_names = []
@@ -66,49 +81,221 @@ def prepare(input_directory: str, output_directory: str, with_background: bool =
66
81
  __delete_corrupted_images(images_path)
67
82
 
68
83
  current_dir = Path(__file__).resolve().parent
69
- yaml_path = current_dir / 'yolov5detect' / 'insect.yaml'
70
- weights_path = current_dir / 'yolov5detect' / 'acc94.pt'
71
84
 
72
- github_release_url = 'https://github.com/Tvenver/Bplusplus/releases/download/v0.1.2/acc94.pt'
85
+ weights_path = current_dir / 'small-generic.pt'
86
+
87
+ github_release_url = 'https://github.com/orlandocloss/TwoStageInsectDetection/releases/download/models/small-generic.pt'
73
88
 
74
89
  if not weights_path.exists():
75
90
  __download_file_from_github_release(github_release_url, weights_path)
76
91
 
77
- run(source=images_path, data=yaml_path, weights=weights_path, save_txt=True, project=temp_dir_path)
92
+ # Add all required classes to safe globals
93
+ serialization.add_safe_globals([
94
+ DetectionModel, Sequential, Conv, Conv2d, BatchNorm2d,
95
+ SiLU, ReLU, LeakyReLU, MaxPool2d, Linear, Dropout, Upsample,
96
+ Module, ModuleList, ModuleDict,
97
+ Bottleneck, C2f, SPPF, Detect, Concat, DFL
98
+ ])
99
+
100
+ model = YOLO(weights_path)
101
+ model.predict(images_path, conf=0.25, save=True, save_txt=True, project=temp_dir_path)
102
+ labels_path = temp_dir_path / "predict" / "labels"
103
+
104
+ if size_filter and len(sizes) <= 2:
105
+ filtered=filter_by_size(images_path, labels_path, sizes)
106
+ print(f"\nFiltered {len(list(images_path.glob('*.jpg')))} images by size out of {original_image_count} input images.\n NOTE: Some images may be filtered due to corruption or inaccurate labels.")
107
+
108
+ if one_stage:
109
+
110
+ __delete_orphaned_images_and_inferences(images_path, labels_path)
111
+ __delete_invalid_txt_files(images_path, labels_path)
112
+ class_idxs = update_labels(class_mapping, labels_path)
113
+ __split_data(class_mapping, temp_dir_path, output_directory)
114
+
115
+ # __save_class_idx_to_file(class_idxs, output_directory)
116
+ final_image_count = count_images_across_splits(output_directory)
117
+ print(f"\nOut of {original_image_count} input images, {final_image_count} are eligible for detection. \nThese are saved across train, test and valid split in {output_directory}.")
118
+ __generate_sample_images_with_detections(output_directory, class_idxs)
119
+
120
+ if with_background:
121
+ print("\nCollecting and splitting background images.")
78
122
 
79
- __delete_orphaned_images_and_inferences(images_path, inference_path, labels_path)
80
- __delete_invalid_txt_files(images_path, inference_path, labels_path)
81
- class_idxs = update_labels(class_mapping, labels_path)
82
- __split_data(class_mapping, temp_dir_path, output_directory)
123
+ bg_images=int(final_image_count*0.06)
83
124
 
84
- # __save_class_idx_to_file(class_idxs, output_directory)
85
- final_image_count = count_images_across_splits(output_directory)
86
- print(f"\nOut of {original_image_count} input images, {final_image_count} are eligible for detection. \nThese are saved across train, test and valid split in {output_directory}.")
87
- __generate_sample_images_with_detections(output_directory, class_idxs)
125
+ search: dict[str, Any] = {
126
+ "scientificName": ["Plantae"]
127
+ }
88
128
 
89
- if with_background:
90
- print("\nCollecting and splitting background images.")
129
+ collect(
130
+ group_by_key=Group.scientificName,
131
+ search_parameters=search,
132
+ images_per_group=bg_images,
133
+ output_directory=temp_dir_path
134
+ )
91
135
 
92
- bg_images=int(final_image_count*0.06)
136
+ __delete_corrupted_images(temp_dir_path / "Plantae")
93
137
 
94
- search: dict[str, Any] = {
95
- "scientificName": ["Plantae"]
96
- }
138
+ __split_background_images(temp_dir_path / "Plantae", output_directory)
97
139
 
98
- collect(
99
- group_by_key=Group.scientificName,
100
- search_parameters=search,
101
- images_per_group=bg_images,
102
- output_directory=temp_dir_path
103
- )
140
+ __count_classes_and_output_table(output_directory, class_idxs)
141
+
142
+ __make_yaml_file(output_directory, class_idxs)
143
+ else:
144
+ try:
145
+ sized_dir = temp_dir_path / "sized"
146
+ sized_dir.mkdir(parents=True, exist_ok=True)
147
+ __two_stage_update(class_mapping, filtered, sized_dir, images_path)
148
+ __classification_split(sized_dir, output_directory)
149
+ __count_classification_split(output_directory, class_mapping)
150
+ except:
151
+ __classification_split(images_path, output_directory)
152
+ __count_classification_split(output_directory, class_mapping)
153
+
154
+ def __count_classification_split(output_directory: str, class_mapping: dict):
155
+ """
156
+ Counts the number of images in the train and valid splits for each class.
157
+
158
+ Args:
159
+ output_directory (str): Path to the output directory containing train and valid splits.
160
+ class_mapping (dict): Dictionary mapping class names to image file names.
161
+ """
162
+ class_counts = {}
163
+ train_counts = {}
164
+ valid_counts = {}
165
+
166
+ for class_name in class_mapping.keys():
167
+ train_dir = output_directory / 'train' / class_name
168
+ valid_dir = output_directory / 'valid' / class_name
169
+
170
+ train_count = len(list(train_dir.glob("*.jpg"))) if train_dir.exists() else 0
171
+ valid_count = len(list(valid_dir.glob("*.jpg"))) if valid_dir.exists() else 0
172
+ total_count = train_count + valid_count
173
+
174
+ class_counts[class_name] = total_count
175
+ train_counts[class_name] = train_count
176
+ valid_counts[class_name] = valid_count
177
+
178
+ table = PrettyTable()
179
+ table.field_names = ["Class", "Train", "Valid", "Total"]
180
+ for class_name in class_mapping.keys():
181
+ table.add_row([
182
+ class_name,
183
+ train_counts[class_name],
184
+ valid_counts[class_name],
185
+ class_counts[class_name]
186
+ ])
187
+ print(table)
188
+ print(f"Saved in {output_directory}")
189
+
190
+ def __classification_split(input_directory: str, output_directory: str):
191
+ """
192
+ Splits the data into train and validation sets for classification tasks.
193
+
194
+ Args:
195
+ input_directory (str): Path to the input directory containing subdirectories of class names.
196
+ output_directory (str): Path to the output directory where train and valid splits will be created.
197
+ """
198
+ input_directory = Path(input_directory)
199
+ output_directory = Path(output_directory)
200
+
201
+ # Create train and valid directories
202
+ train_dir = output_directory / 'train'
203
+ valid_dir = output_directory / 'valid'
204
+
205
+ train_dir.mkdir(parents=True, exist_ok=True)
206
+ valid_dir.mkdir(parents=True, exist_ok=True)
207
+
208
+ # Process each class directory
209
+ for class_dir in input_directory.iterdir():
210
+ if not class_dir.is_dir():
211
+ continue
212
+
213
+ class_name = class_dir.name
214
+ print(f"Processing class: {class_name}")
215
+
216
+ # Create corresponding class directories in train and valid
217
+ (train_dir / class_name).mkdir(exist_ok=True)
218
+ (valid_dir / class_name).mkdir(exist_ok=True)
219
+
220
+ # Get all image files
221
+ image_files = list(class_dir.glob('*.jpg')) + list(class_dir.glob('*.jpeg')) + list(class_dir.glob('*.png'))
222
+
223
+ if not image_files:
224
+ print(f"Warning: No images found in {class_dir}")
225
+ continue
226
+
227
+ # Shuffle the files to ensure random distribution
228
+ np.random.shuffle(image_files)
229
+
230
+ # Split into train (90%) and valid (10%)
231
+ split_idx = int(len(image_files) * 0.9)
232
+ train_files = image_files[:split_idx]
233
+ valid_files = image_files[split_idx:]
234
+
235
+ # Copy files to respective directories
236
+ for img_file in train_files:
237
+ shutil.copy(img_file, train_dir / class_name / img_file.name)
238
+
239
+ for img_file in valid_files:
240
+ shutil.copy(img_file, valid_dir / class_name / img_file.name)
241
+
242
+ print(f" - {len(train_files)} images in train, {len(valid_files)} images in valid")
243
+
244
+ print(f"\nData split complete. Train and validation sets created in {output_directory}")
104
245
 
105
- __delete_corrupted_images(temp_dir_path / "Plantae")
246
+ def filter_by_size(images_path: Path, labels_path: Path, sizes: list):
247
+ """
248
+ Filters images by size and updates labels accordingly.
106
249
 
107
- __split_background_images(temp_dir_path / "Plantae", output_directory)
250
+ Args:
251
+ images_path (Path): The path to the directory containing images.
252
+ labels_path (Path): The path to the directory containing labels.
253
+ sizes (list): A list of sizes to filter by.
254
+ """
255
+ size_map={
256
+ "small": [0, 0.15],
257
+ "medium": [0.15, 0.3],
258
+ "large": [0.3, 1],
259
+ }
260
+
261
+ filtered_images = []
262
+ for image_file in images_path.glob("*.jpg"):
263
+ label_file = labels_path / (image_file.stem + ".txt")
264
+ image_name = image_file.name
265
+
266
+ if label_file.exists():
267
+ with open(label_file, 'r') as file:
268
+ lines = file.readlines()
269
+ if len(lines) != 1:
270
+ continue
271
+ else:
272
+ parts = lines[0].split()
273
+ _, _, width, height = map(float, parts[1:])
274
+ for size in sizes:
275
+ if width < size_map[size][1] and width >= size_map[size][0] and height < size_map[size][1] and height >= size_map[size][0]:
276
+ filtered_images.append(image_name)
277
+
278
+ for image_file in images_path.glob("*.jpg"):
279
+ label_file = labels_path / (image_file.stem + ".txt")
280
+ image_name = image_file.name
281
+ if image_name not in filtered_images:
282
+ image_file.unlink()
283
+ try:
284
+ label_file.unlink()
285
+ except FileNotFoundError:
286
+ pass
287
+ return filtered_images
108
288
 
109
- __count_classes_and_output_table(output_directory, class_idxs)
289
+ def __two_stage_update(class_mapping: dict, filtered_images: Path, output_directory: Path, images_path: Path):
290
+ """
291
+ Prepares folders with class name containing filtered images.
292
+ """
110
293
 
111
- __make_yaml_file(output_directory, class_idxs)
294
+ for class_name, images in class_mapping.items():
295
+ for image_name in images:
296
+ if image_name in filtered_images:
297
+ (output_directory / class_name).mkdir(parents=True, exist_ok=True)
298
+ shutil.copy(images_path / image_name, output_directory / class_name / image_name)
112
299
 
113
300
  def __delete_corrupted_images(images_path: Path):
114
301
 
@@ -158,7 +345,7 @@ def __download_file_from_github_release(url, dest_path):
158
345
  progress_bar.close()
159
346
  raise Exception(f"Failed to download file from {url}")
160
347
 
161
- def __delete_orphaned_images_and_inferences(images_path: Path, inference_path: Path, labels_path: Path):
348
+ def __delete_orphaned_images_and_inferences(images_path: Path, labels_path: Path):
162
349
 
163
350
  """
164
351
  Deletes orphaned images and their corresponding inference files if they do not have a label file.
@@ -177,15 +364,10 @@ def __delete_orphaned_images_and_inferences(images_path: Path, inference_path: P
177
364
  for txt_file in labels_path.glob("*.txt"):
178
365
  image_file_jpg = images_path / (txt_file.stem + ".jpg")
179
366
  image_file_jpeg = images_path / (txt_file.stem + ".jpeg")
180
- inference_file_jpg = inference_path / (txt_file.stem + ".jpg")
181
- inference_file_jpeg = inference_path / (txt_file.stem + ".jpeg")
182
367
 
183
368
  if not (image_file_jpg.exists() or image_file_jpeg.exists()):
184
369
  print(f"Deleting {txt_file.name} - No corresponding image file")
185
370
  txt_file.unlink()
186
- elif not (inference_file_jpg.exists() or inference_file_jpeg.exists()):
187
- print(f"Deleting {txt_file.name} - No corresponding inference file")
188
- txt_file.unlink()
189
371
 
190
372
  label_stems = {txt_file.stem for txt_file in labels_path.glob("*.txt")}
191
373
  image_files = list(images_path.glob("*.jpg")) + list(images_path.glob("*.jpeg"))
@@ -195,19 +377,9 @@ def __delete_orphaned_images_and_inferences(images_path: Path, inference_path: P
195
377
  print(f"Deleting orphaned image: {image_file.name}")
196
378
  image_file.unlink()
197
379
 
198
- inference_file_jpg = inference_path / (image_file.stem + ".jpg")
199
- inference_file_jpeg = inference_path / (image_file.stem + ".jpeg")
200
-
201
- if inference_file_jpg.exists():
202
- inference_file_jpg.unlink()
203
- print(f"Deleted corresponding inference file: {inference_file_jpg.name}")
204
- elif inference_file_jpeg.exists():
205
- inference_file_jpeg.unlink()
206
- print(f"Deleted corresponding inference file: {inference_file_jpeg.name}")
380
+ print("Orphaned images files without corresponding labels have been deleted.")
207
381
 
208
- print("Orphaned images and inference files without corresponding labels have been deleted.")
209
-
210
- def __delete_invalid_txt_files(images_path: Path, inference_path: Path, labels_path: Path):
382
+ def __delete_invalid_txt_files(images_path: Path, labels_path: Path):
211
383
 
212
384
  """
213
385
  Deletes invalid text files and their corresponding image and inference files.
@@ -232,8 +404,6 @@ def __delete_invalid_txt_files(images_path: Path, inference_path: Path, labels_p
232
404
 
233
405
  image_file_jpg = images_path / (txt_file.stem + ".jpg")
234
406
  image_file_jpeg = images_path / (txt_file.stem + ".jpeg")
235
- inference_file_jpg = inference_path / (txt_file.stem + ".jpg")
236
- inference_file_jpeg = inference_path / (txt_file.stem + ".jpeg")
237
407
 
238
408
  if image_file_jpg.exists():
239
409
  image_file_jpg.unlink()
@@ -242,14 +412,7 @@ def __delete_invalid_txt_files(images_path: Path, inference_path: Path, labels_p
242
412
  image_file_jpeg.unlink()
243
413
  print(f"Deleted corresponding image file: {image_file_jpeg.name}")
244
414
 
245
- if inference_file_jpg.exists():
246
- inference_file_jpg.unlink()
247
- print(f"Deleted corresponding inference file: {inference_file_jpg.name}")
248
- elif inference_file_jpeg.exists():
249
- inference_file_jpeg.unlink()
250
- print(f"Deleted corresponding inference file: {inference_file_jpeg.name}")
251
-
252
- print("Invalid text files and their corresponding images and inference files have been deleted.")
415
+ print("Invalid text files and their corresponding images files have been deleted.")
253
416
 
254
417
 
255
418
  def __split_data(class_mapping: dict, temp_dir_path: Path, output_directory: Path):
@@ -262,7 +425,7 @@ def __split_data(class_mapping: dict, temp_dir_path: Path, output_directory: Pat
262
425
  output_directory (Path): The path to the output directory where the split data will be saved.
263
426
  """
264
427
  images_dir = temp_dir_path / "images"
265
- labels_dir = temp_dir_path / "labels"
428
+ labels_dir = temp_dir_path / "predict" / "labels"
266
429
 
267
430
  def create_dirs(split):
268
431
  (output_directory / split).mkdir(parents=True, exist_ok=True)
@@ -483,6 +646,7 @@ def __count_classes_and_output_table(output_directory: Path, class_idxs: dict):
483
646
  table.add_row([class_name, class_index, train_count, test_count, valid_count, total])
484
647
 
485
648
  print(table)
649
+
486
650
  def update_labels(class_mapping: dict, labels_path: Path) -> dict:
487
651
  """
488
652
  Updates the labels based on the class mapping.