ultralytics 8.1.26__py3-none-any.whl → 8.1.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ultralytics might be problematic. Click here for more details.
- ultralytics/__init__.py +1 -1
- ultralytics/data/base.py +3 -1
- ultralytics/data/loaders.py +12 -11
- ultralytics/engine/predictor.py +17 -13
- ultralytics/engine/validator.py +1 -1
- ultralytics/nn/autobackend.py +6 -2
- ultralytics/trackers/track.py +9 -6
- {ultralytics-8.1.26.dist-info → ultralytics-8.1.27.dist-info}/METADATA +1 -1
- {ultralytics-8.1.26.dist-info → ultralytics-8.1.27.dist-info}/RECORD +13 -13
- {ultralytics-8.1.26.dist-info → ultralytics-8.1.27.dist-info}/LICENSE +0 -0
- {ultralytics-8.1.26.dist-info → ultralytics-8.1.27.dist-info}/WHEEL +0 -0
- {ultralytics-8.1.26.dist-info → ultralytics-8.1.27.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.1.26.dist-info → ultralytics-8.1.27.dist-info}/top_level.txt +0 -0
ultralytics/__init__.py
CHANGED
ultralytics/data/base.py
CHANGED
|
@@ -120,7 +120,9 @@ class BaseDataset(Dataset):
|
|
|
120
120
|
except Exception as e:
|
|
121
121
|
raise FileNotFoundError(f"{self.prefix}Error loading data from {img_path}\n{HELP_URL}") from e
|
|
122
122
|
if self.fraction < 1:
|
|
123
|
-
im_files = im_files[: round(len(im_files) * self.fraction)]
|
|
123
|
+
# im_files = im_files[: round(len(im_files) * self.fraction)]
|
|
124
|
+
num_elements_to_select = round(len(im_files) * self.fraction)
|
|
125
|
+
im_files = random.sample(im_files, num_elements_to_select)
|
|
124
126
|
return im_files
|
|
125
127
|
|
|
126
128
|
def update_labels(self, include_class: Optional[list]):
|
ultralytics/data/loaders.py
CHANGED
|
@@ -80,8 +80,6 @@ class LoadStreams:
|
|
|
80
80
|
self.imgs = [[] for _ in range(n)] # images
|
|
81
81
|
self.shape = [[] for _ in range(n)] # image shapes
|
|
82
82
|
self.sources = [ops.clean_str(x) for x in sources] # clean source names for later
|
|
83
|
-
self.info = [""] * n
|
|
84
|
-
self.is_video = [True] * n
|
|
85
83
|
for i, s in enumerate(sources): # index, source
|
|
86
84
|
# Start thread to read frames from video stream
|
|
87
85
|
st = f"{i + 1}/{n}: {s}... "
|
|
@@ -178,7 +176,7 @@ class LoadStreams:
|
|
|
178
176
|
images.append(x.pop(-1) if x else np.zeros(self.shape[i], dtype=np.uint8))
|
|
179
177
|
x.clear()
|
|
180
178
|
|
|
181
|
-
return self.sources, images,
|
|
179
|
+
return self.sources, images, [""] * self.bs
|
|
182
180
|
|
|
183
181
|
def __len__(self):
|
|
184
182
|
"""Return the length of the sources object."""
|
|
@@ -227,6 +225,7 @@ class LoadScreenshots:
|
|
|
227
225
|
self.frame = 0
|
|
228
226
|
self.sct = mss.mss()
|
|
229
227
|
self.bs = 1
|
|
228
|
+
self.fps = 30
|
|
230
229
|
|
|
231
230
|
# Parse monitor shape
|
|
232
231
|
monitor = self.sct.monitors[self.screen]
|
|
@@ -246,7 +245,7 @@ class LoadScreenshots:
|
|
|
246
245
|
s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: "
|
|
247
246
|
|
|
248
247
|
self.frame += 1
|
|
249
|
-
return [str(self.screen)], [im0], [
|
|
248
|
+
return [str(self.screen)], [im0], [s] # screen, img, string
|
|
250
249
|
|
|
251
250
|
|
|
252
251
|
class LoadImagesAndVideos:
|
|
@@ -298,6 +297,7 @@ class LoadImagesAndVideos:
|
|
|
298
297
|
|
|
299
298
|
self.files = images + videos
|
|
300
299
|
self.nf = ni + nv # number of files
|
|
300
|
+
self.ni = ni # number of images
|
|
301
301
|
self.video_flag = [False] * ni + [True] * nv
|
|
302
302
|
self.mode = "image"
|
|
303
303
|
self.vid_stride = vid_stride # video frame-rate stride
|
|
@@ -319,11 +319,11 @@ class LoadImagesAndVideos:
|
|
|
319
319
|
|
|
320
320
|
def __next__(self):
|
|
321
321
|
"""Returns the next batch of images or video frames along with their paths and metadata."""
|
|
322
|
-
paths, imgs,
|
|
322
|
+
paths, imgs, info = [], [], []
|
|
323
323
|
while len(imgs) < self.bs:
|
|
324
324
|
if self.count >= self.nf: # end of file list
|
|
325
325
|
if len(imgs) > 0:
|
|
326
|
-
return paths, imgs,
|
|
326
|
+
return paths, imgs, info # return last partial batch
|
|
327
327
|
else:
|
|
328
328
|
raise StopIteration
|
|
329
329
|
|
|
@@ -344,7 +344,6 @@ class LoadImagesAndVideos:
|
|
|
344
344
|
self.frame += 1
|
|
345
345
|
paths.append(path)
|
|
346
346
|
imgs.append(im0)
|
|
347
|
-
is_video.append(True)
|
|
348
347
|
info.append(f"video {self.count + 1}/{self.nf} (frame {self.frame}/{self.frames}) {path}: ")
|
|
349
348
|
if self.frame == self.frames: # end of video
|
|
350
349
|
self.count += 1
|
|
@@ -363,16 +362,18 @@ class LoadImagesAndVideos:
|
|
|
363
362
|
raise FileNotFoundError(f"Image Not Found {path}")
|
|
364
363
|
paths.append(path)
|
|
365
364
|
imgs.append(im0)
|
|
366
|
-
is_video.append(False) # no capture object for images
|
|
367
365
|
info.append(f"image {self.count + 1}/{self.nf} {path}: ")
|
|
368
366
|
self.count += 1 # move to the next file
|
|
367
|
+
if self.count >= self.ni: # end of image list
|
|
368
|
+
break
|
|
369
369
|
|
|
370
|
-
return paths, imgs,
|
|
370
|
+
return paths, imgs, info
|
|
371
371
|
|
|
372
372
|
def _new_video(self, path):
|
|
373
373
|
"""Creates a new video capture object for the given path."""
|
|
374
374
|
self.frame = 0
|
|
375
375
|
self.cap = cv2.VideoCapture(path)
|
|
376
|
+
self.fps = int(self.cap.get(cv2.CAP_PROP_FPS))
|
|
376
377
|
if not self.cap.isOpened():
|
|
377
378
|
raise FileNotFoundError(f"Failed to open video {path}")
|
|
378
379
|
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)
|
|
@@ -429,7 +430,7 @@ class LoadPilAndNumpy:
|
|
|
429
430
|
if self.count == 1: # loop only once as it's batch inference
|
|
430
431
|
raise StopIteration
|
|
431
432
|
self.count += 1
|
|
432
|
-
return self.paths, self.im0, [
|
|
433
|
+
return self.paths, self.im0, [""] * self.bs
|
|
433
434
|
|
|
434
435
|
def __iter__(self):
|
|
435
436
|
"""Enables iteration for class LoadPilAndNumpy."""
|
|
@@ -494,7 +495,7 @@ class LoadTensor:
|
|
|
494
495
|
if self.count == 1:
|
|
495
496
|
raise StopIteration
|
|
496
497
|
self.count += 1
|
|
497
|
-
return self.paths, self.im0, [
|
|
498
|
+
return self.paths, self.im0, [""] * self.bs
|
|
498
499
|
|
|
499
500
|
def __len__(self):
|
|
500
501
|
"""Returns the batch size."""
|
ultralytics/engine/predictor.py
CHANGED
|
@@ -30,6 +30,7 @@ Usage - formats:
|
|
|
30
30
|
"""
|
|
31
31
|
|
|
32
32
|
import platform
|
|
33
|
+
import re
|
|
33
34
|
import threading
|
|
34
35
|
from pathlib import Path
|
|
35
36
|
|
|
@@ -236,7 +237,7 @@ class BasePredictor:
|
|
|
236
237
|
self.run_callbacks("on_predict_start")
|
|
237
238
|
for self.batch in self.dataset:
|
|
238
239
|
self.run_callbacks("on_predict_batch_start")
|
|
239
|
-
paths, im0s,
|
|
240
|
+
paths, im0s, s = self.batch
|
|
240
241
|
|
|
241
242
|
# Preprocess
|
|
242
243
|
with profilers[0]:
|
|
@@ -264,7 +265,7 @@ class BasePredictor:
|
|
|
264
265
|
"postprocess": profilers[2].dt * 1e3 / n,
|
|
265
266
|
}
|
|
266
267
|
if self.args.verbose or self.args.save or self.args.save_txt or self.args.show:
|
|
267
|
-
s[i] += self.write_results(i, Path(paths[i]), im,
|
|
268
|
+
s[i] += self.write_results(i, Path(paths[i]), im, s)
|
|
268
269
|
|
|
269
270
|
# Print batch results
|
|
270
271
|
if self.args.verbose:
|
|
@@ -294,11 +295,12 @@ class BasePredictor:
|
|
|
294
295
|
def setup_model(self, model, verbose=True):
|
|
295
296
|
"""Initialize YOLO model with given parameters and set it to evaluation mode."""
|
|
296
297
|
self.model = AutoBackend(
|
|
297
|
-
model or self.args.model,
|
|
298
|
+
weights=model or self.args.model,
|
|
298
299
|
device=select_device(self.args.device, verbose=verbose),
|
|
299
300
|
dnn=self.args.dnn,
|
|
300
301
|
data=self.args.data,
|
|
301
302
|
fp16=self.args.half,
|
|
303
|
+
batch=self.args.batch,
|
|
302
304
|
fuse=True,
|
|
303
305
|
verbose=verbose,
|
|
304
306
|
)
|
|
@@ -307,7 +309,7 @@ class BasePredictor:
|
|
|
307
309
|
self.args.half = self.model.fp16 # update half
|
|
308
310
|
self.model.eval()
|
|
309
311
|
|
|
310
|
-
def write_results(self, i, p, im,
|
|
312
|
+
def write_results(self, i, p, im, s):
|
|
311
313
|
"""Write inference results to a file or directory."""
|
|
312
314
|
string = "" # print string
|
|
313
315
|
if len(im.shape) == 3:
|
|
@@ -316,9 +318,10 @@ class BasePredictor:
|
|
|
316
318
|
string += f"{i}: "
|
|
317
319
|
frame = self.dataset.count
|
|
318
320
|
else:
|
|
319
|
-
|
|
321
|
+
match = re.search(r"frame (\d+)/", s[i])
|
|
322
|
+
frame = int(match.group(1)) if match else None # 0 if frame undetermined
|
|
320
323
|
|
|
321
|
-
self.txt_path = self.save_dir / "labels" / (p.stem +
|
|
324
|
+
self.txt_path = self.save_dir / "labels" / (p.stem + ("" if self.dataset.mode == "image" else f"_{frame}"))
|
|
322
325
|
string += "%gx%g " % im.shape[2:]
|
|
323
326
|
result = self.results[i]
|
|
324
327
|
result.save_dir = self.save_dir.__str__() # used in other locations
|
|
@@ -340,18 +343,19 @@ class BasePredictor:
|
|
|
340
343
|
if self.args.save_crop:
|
|
341
344
|
result.save_crop(save_dir=self.save_dir / "crops", file_name=self.txt_path.stem)
|
|
342
345
|
if self.args.show:
|
|
343
|
-
self.show(str(p)
|
|
346
|
+
self.show(str(p))
|
|
344
347
|
if self.args.save:
|
|
345
|
-
self.save_predicted_images(str(self.save_dir / p.name),
|
|
348
|
+
self.save_predicted_images(str(self.save_dir / p.name), frame)
|
|
346
349
|
|
|
347
350
|
return string
|
|
348
351
|
|
|
349
|
-
def save_predicted_images(self, save_path="",
|
|
352
|
+
def save_predicted_images(self, save_path="", frame=0):
|
|
350
353
|
"""Save video predictions as mp4 at specified path."""
|
|
351
354
|
im = self.plotted_img
|
|
352
355
|
|
|
353
356
|
# Save videos and streams
|
|
354
|
-
if
|
|
357
|
+
if self.dataset.mode in {"stream", "video"}:
|
|
358
|
+
fps = self.dataset.fps if self.dataset.mode == "video" else 30
|
|
355
359
|
frames_path = f'{save_path.split(".", 1)[0]}_frames/'
|
|
356
360
|
if save_path not in self.vid_writer: # new video
|
|
357
361
|
if self.args.save_frames:
|
|
@@ -360,7 +364,7 @@ class BasePredictor:
|
|
|
360
364
|
self.vid_writer[save_path] = cv2.VideoWriter(
|
|
361
365
|
filename=str(Path(save_path).with_suffix(suffix)),
|
|
362
366
|
fourcc=cv2.VideoWriter_fourcc(*fourcc),
|
|
363
|
-
fps=
|
|
367
|
+
fps=fps, # integer required, floats produce error in MP4 codec
|
|
364
368
|
frameSize=(im.shape[1], im.shape[0]), # (width, height)
|
|
365
369
|
)
|
|
366
370
|
|
|
@@ -373,7 +377,7 @@ class BasePredictor:
|
|
|
373
377
|
else:
|
|
374
378
|
cv2.imwrite(save_path, im)
|
|
375
379
|
|
|
376
|
-
def show(self, p=""
|
|
380
|
+
def show(self, p=""):
|
|
377
381
|
"""Display an image in a window using OpenCV imshow()."""
|
|
378
382
|
im = self.plotted_img
|
|
379
383
|
if platform.system() == "Linux" and p not in self.windows:
|
|
@@ -381,7 +385,7 @@ class BasePredictor:
|
|
|
381
385
|
cv2.namedWindow(p, cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
|
|
382
386
|
cv2.resizeWindow(p, im.shape[1], im.shape[0]) # (width, height)
|
|
383
387
|
cv2.imshow(p, im)
|
|
384
|
-
cv2.waitKey(
|
|
388
|
+
cv2.waitKey(300 if self.dataset.mode == "image" else 1) # 1 millisecond
|
|
385
389
|
|
|
386
390
|
def run_callbacks(self, event: str):
|
|
387
391
|
"""Runs all registered callbacks for a specific event."""
|
ultralytics/engine/validator.py
CHANGED
|
@@ -122,7 +122,7 @@ class BaseValidator:
|
|
|
122
122
|
else:
|
|
123
123
|
callbacks.add_integration_callbacks(self)
|
|
124
124
|
model = AutoBackend(
|
|
125
|
-
model or self.args.model,
|
|
125
|
+
weights=model or self.args.model,
|
|
126
126
|
device=select_device(self.args.device, self.args.batch),
|
|
127
127
|
dnn=self.args.dnn,
|
|
128
128
|
data=self.args.data,
|
ultralytics/nn/autobackend.py
CHANGED
|
@@ -86,6 +86,7 @@ class AutoBackend(nn.Module):
|
|
|
86
86
|
dnn=False,
|
|
87
87
|
data=None,
|
|
88
88
|
fp16=False,
|
|
89
|
+
batch=1,
|
|
89
90
|
fuse=True,
|
|
90
91
|
verbose=True,
|
|
91
92
|
):
|
|
@@ -98,6 +99,7 @@ class AutoBackend(nn.Module):
|
|
|
98
99
|
dnn (bool): Use OpenCV DNN module for ONNX inference. Defaults to False.
|
|
99
100
|
data (str | Path | optional): Path to the additional data.yaml file containing class names. Optional.
|
|
100
101
|
fp16 (bool): Enable half-precision inference. Supported only on specific backends. Defaults to False.
|
|
102
|
+
batch (int): Batch-size to assume for inference.
|
|
101
103
|
fuse (bool): Fuse Conv2D + BatchNorm layers for optimization. Defaults to True.
|
|
102
104
|
verbose (bool): Enable verbose logging. Defaults to True.
|
|
103
105
|
"""
|
|
@@ -204,7 +206,9 @@ class AutoBackend(nn.Module):
|
|
|
204
206
|
if batch_dim.is_static:
|
|
205
207
|
batch_size = batch_dim.get_length()
|
|
206
208
|
|
|
207
|
-
|
|
209
|
+
# OpenVINO inference modes are 'LATENCY', 'THROUGHPUT' (not recommended), or 'CUMULATIVE_THROUGHPUT'
|
|
210
|
+
inference_mode = "CUMULATIVE_THROUGHPUT" if batch > 1 else "LATENCY"
|
|
211
|
+
LOGGER.info(f"Using OpenVINO {inference_mode} mode for batch-size={batch_size} inference...")
|
|
208
212
|
ov_compiled_model = core.compile_model(
|
|
209
213
|
ov_model,
|
|
210
214
|
device_name="AUTO", # AUTO selects best available device, do not modify
|
|
@@ -454,7 +458,7 @@ class AutoBackend(nn.Module):
|
|
|
454
458
|
# Start async inference with userdata=i to specify the position in results list
|
|
455
459
|
async_queue.start_async(inputs={self.input_name: im[i : i + 1]}, userdata=i) # keep image as BCHW
|
|
456
460
|
async_queue.wait_all() # wait for all inference requests to complete
|
|
457
|
-
y = [list(r.values()) for r in results]
|
|
461
|
+
y = np.concatenate([list(r.values())[0] for r in results])
|
|
458
462
|
|
|
459
463
|
else: # inference_mode = "LATENCY", optimized for fastest first result at batch-size 1
|
|
460
464
|
y = list(self.ov_compiled_model(im).values())
|
ultralytics/trackers/track.py
CHANGED
|
@@ -38,6 +38,8 @@ def on_predict_start(predictor: object, persist: bool = False) -> None:
|
|
|
38
38
|
for _ in range(predictor.dataset.bs):
|
|
39
39
|
tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30)
|
|
40
40
|
trackers.append(tracker)
|
|
41
|
+
if predictor.dataset.mode != "stream": # only need one tracker for other modes.
|
|
42
|
+
break
|
|
41
43
|
predictor.trackers = trackers
|
|
42
44
|
predictor.vid_path = [None] * predictor.dataset.bs # for determining when to reset tracker on new video
|
|
43
45
|
|
|
@@ -50,20 +52,21 @@ def on_predict_postprocess_end(predictor: object, persist: bool = False) -> None
|
|
|
50
52
|
predictor (object): The predictor object containing the predictions.
|
|
51
53
|
persist (bool, optional): Whether to persist the trackers if they already exist. Defaults to False.
|
|
52
54
|
"""
|
|
53
|
-
bs = predictor.dataset.bs
|
|
54
55
|
path, im0s = predictor.batch[:2]
|
|
55
56
|
|
|
56
57
|
is_obb = predictor.args.task == "obb"
|
|
57
|
-
|
|
58
|
+
is_stream = predictor.dataset.mode == "stream"
|
|
59
|
+
for i in range(len(im0s)):
|
|
60
|
+
tracker = predictor.trackers[i if is_stream else 0]
|
|
58
61
|
vid_path = predictor.save_dir / Path(path[i]).name
|
|
59
|
-
if not persist and predictor.vid_path[i] != vid_path:
|
|
60
|
-
|
|
61
|
-
predictor.vid_path[i] = vid_path
|
|
62
|
+
if not persist and predictor.vid_path[i if is_stream else 0] != vid_path:
|
|
63
|
+
tracker.reset()
|
|
64
|
+
predictor.vid_path[i if is_stream else 0] = vid_path
|
|
62
65
|
|
|
63
66
|
det = (predictor.results[i].obb if is_obb else predictor.results[i].boxes).cpu().numpy()
|
|
64
67
|
if len(det) == 0:
|
|
65
68
|
continue
|
|
66
|
-
tracks =
|
|
69
|
+
tracks = tracker.update(det, im0s[i])
|
|
67
70
|
if len(tracks) == 0:
|
|
68
71
|
continue
|
|
69
72
|
idx = tracks[:, -1].astype(int)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: ultralytics
|
|
3
|
-
Version: 8.1.
|
|
3
|
+
Version: 8.1.27
|
|
4
4
|
Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
|
5
5
|
Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
|
|
6
6
|
Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
ultralytics/__init__.py,sha256=
|
|
1
|
+
ultralytics/__init__.py,sha256=NtUw_FygdoZVGcRGxwxBIydyV7n2tuCZWJaRkwP8G4E,625
|
|
2
2
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
|
3
3
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
|
4
4
|
ultralytics/cfg/__init__.py,sha256=Dk0UPabXlPX5iCDzqf8MIxCNtY7HMhVRcd_B2tZw9_w,20767
|
|
@@ -60,11 +60,11 @@ ultralytics/cfg/trackers/bytetrack.yaml,sha256=QvHmtuwulK4X6j3T5VEqtCm0sbWWBUVmW
|
|
|
60
60
|
ultralytics/data/__init__.py,sha256=A3i0n-2MnNzSdYqhM8xynBO2HJNKGSXWhPvRyO0_u1I,409
|
|
61
61
|
ultralytics/data/annotator.py,sha256=evXQzARVerc0hb9ol-n_GrrHf-dlXO4lCMMWEZoJ2UM,2117
|
|
62
62
|
ultralytics/data/augment.py,sha256=ORotqUN-qulkHxzoW5hFF_CZDlBhuaqGgAsiPUVIf4I,52000
|
|
63
|
-
ultralytics/data/base.py,sha256=
|
|
63
|
+
ultralytics/data/base.py,sha256=_rbKt666SKVUqQtoMyzmYMCEE4rDNryPM_uweRqE4hA,13363
|
|
64
64
|
ultralytics/data/build.py,sha256=z3hirjrw5BeLhqz3fRWQvPAS-alaJKY5KdfVnmTuiug,6406
|
|
65
65
|
ultralytics/data/converter.py,sha256=DJ5aSk7w-RBKqrrABUoOahP_Lgccn7ujJSmVufOkBps,16503
|
|
66
66
|
ultralytics/data/dataset.py,sha256=aBia_ZUUqynstW2BRS1sGp2ggnhUkeUSZ_QC2nyJmvo,17616
|
|
67
|
-
ultralytics/data/loaders.py,sha256=
|
|
67
|
+
ultralytics/data/loaders.py,sha256=zrfxXQ5CMFXKTR_FUPk3oKHwXruKmoXfuJq4B0vcSyA,23045
|
|
68
68
|
ultralytics/data/split_dota.py,sha256=1q2FZC0SE4deRpXUSbKTbUAjX9VeejUIFM2DBLF8Cco,9961
|
|
69
69
|
ultralytics/data/utils.py,sha256=VBmVJFUyR9M8Wu7FQ0QCLS8bIFk0KHT2IH8oqePZOhs,29778
|
|
70
70
|
ultralytics/data/explorer/__init__.py,sha256=-Y3m1ZedepOQUv_KW82zaGxvU_PSHcuwUTFqG9BhAr4,113
|
|
@@ -75,11 +75,11 @@ ultralytics/data/explorer/gui/dash.py,sha256=a2s8oJKI8kqnWEcIyqCCzvIyvM_uZmfMaxr
|
|
|
75
75
|
ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
|
|
76
76
|
ultralytics/engine/exporter.py,sha256=CTiyS-EXvkxZoM-G_lmehfXL_MPVhhpUzB4HMFvuPDY,53718
|
|
77
77
|
ultralytics/engine/model.py,sha256=Kh5Bs3Rq6xJnpwCBtKFonNBLXYScg81uhj6zXx16bBw,39193
|
|
78
|
-
ultralytics/engine/predictor.py,sha256=
|
|
78
|
+
ultralytics/engine/predictor.py,sha256=fQNzm0KKAY8Af9KpyvPpLkkGqmQE3kBFIiE-_PnQjmY,17030
|
|
79
79
|
ultralytics/engine/results.py,sha256=SY3sn2OBMfAFaPoaDKo0Wu-jSi7avISYohjtR_bur9M,30120
|
|
80
80
|
ultralytics/engine/trainer.py,sha256=C04cEN9v-kvR2dIIjgAN8dBAx8XSTChlQkDxAxfwTlU,34527
|
|
81
81
|
ultralytics/engine/tuner.py,sha256=DzgTH3uk-VUUVoJ0K3tM4N5TJ6A3fMNlcDYr5g2I9lA,11763
|
|
82
|
-
ultralytics/engine/validator.py,sha256=
|
|
82
|
+
ultralytics/engine/validator.py,sha256=rcmJSGrsAfj-ryQktv6-fe0hAT7Z8CLNhUUUf0VsPYI,14645
|
|
83
83
|
ultralytics/hub/__init__.py,sha256=hNKAjBFZAi8_ZYasurDpDMlEOmFw0GrXCV7kLb2A-zE,5068
|
|
84
84
|
ultralytics/hub/auth.py,sha256=hc97pJ01OfI8oQ7uw3ubKbiVCDSGxSGJHoo9W6hrrNw,5403
|
|
85
85
|
ultralytics/hub/session.py,sha256=DXPQcPHFS84DlSbXnsfwUfCgjv5W4F3ioA7ADMWzm7w,14703
|
|
@@ -137,7 +137,7 @@ ultralytics/models/yolo/segment/predict.py,sha256=xtA0ZZyuh9WVpX7zZFdAeCkWnxhQ30
|
|
|
137
137
|
ultralytics/models/yolo/segment/train.py,sha256=aOQpDIptZfKSl9mFa6B-3W3QccMRlmBINBkI9K8-3sQ,2298
|
|
138
138
|
ultralytics/models/yolo/segment/val.py,sha256=njiF6RWddS-HOWxVvlk5PXRw6UOgEt_HEOZVPF7rruQ,11745
|
|
139
139
|
ultralytics/nn/__init__.py,sha256=4BPLHY89xEM_al5uK0aOmFgiML6CMGEZbezxOvTjOEs,587
|
|
140
|
-
ultralytics/nn/autobackend.py,sha256=
|
|
140
|
+
ultralytics/nn/autobackend.py,sha256=guDUpClKObK2Bwl4NH0C3EDVl3UEOvqRn1mr3c3j9Gs,29021
|
|
141
141
|
ultralytics/nn/tasks.py,sha256=JuXiYgnZBDC51MNTsaeSjz8H1ohio1Mx58l0EjdTm8c,42674
|
|
142
142
|
ultralytics/nn/modules/__init__.py,sha256=Ga3MDpwX6DeI7VSH8joti5uleP4mgkQGolbe8RLZ2T8,2326
|
|
143
143
|
ultralytics/nn/modules/block.py,sha256=yCHgCQTs2pIzCr7zqMJs8UF-3DM0-8X99k9vkEjv1ZA,25589
|
|
@@ -155,7 +155,7 @@ ultralytics/trackers/__init__.py,sha256=j72IgH2dZHQArMPK4YwcV5ieIw94fYvlGdQjB9cO
|
|
|
155
155
|
ultralytics/trackers/basetrack.py,sha256=-vBDD-Q9lsxfTMK2w9kuqWGrYbRMmaBCCEbGGyR53gE,3675
|
|
156
156
|
ultralytics/trackers/bot_sort.py,sha256=39AvhYVbT7izF3--rX_e6Lhgb5czTA23gw6AgnNcRds,8601
|
|
157
157
|
ultralytics/trackers/byte_tracker.py,sha256=z6z6jrhj8WeAP2azWZkhMUZET6g_8XMkMfdNpJg7jus,18871
|
|
158
|
-
ultralytics/trackers/track.py,sha256=
|
|
158
|
+
ultralytics/trackers/track.py,sha256=ayktOpi7SmaONsWqYXebrLQlVgDGuC9GNhmCBsDnLtI,3462
|
|
159
159
|
ultralytics/trackers/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
|
|
160
160
|
ultralytics/trackers/utils/gmc.py,sha256=mXRqtlue1nmQU92TOKNH40R6lYFdUrKCYIbiPH6FIu0,13658
|
|
161
161
|
ultralytics/trackers/utils/kalman_filter.py,sha256=JN1sAcfJZy8fTZxc8w3jUJnGQDKtgAL__p4nTR6RM2I,15168
|
|
@@ -189,9 +189,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyz
|
|
|
189
189
|
ultralytics/utils/callbacks/raytune.py,sha256=6OgGNuC35F29lw8Dl_d0lue4-iBR6dqrBVQnIRQDx4E,632
|
|
190
190
|
ultralytics/utils/callbacks/tensorboard.py,sha256=hRmWjbqdA4RNaLuSZznuDcpOBW-_-_Ga0u-B8UU-7ZI,4134
|
|
191
191
|
ultralytics/utils/callbacks/wb.py,sha256=4QI81nHdzgwhXHlmTiRxLqunvkKakLXYUhHTUY1ZeHA,6635
|
|
192
|
-
ultralytics-8.1.
|
|
193
|
-
ultralytics-8.1.
|
|
194
|
-
ultralytics-8.1.
|
|
195
|
-
ultralytics-8.1.
|
|
196
|
-
ultralytics-8.1.
|
|
197
|
-
ultralytics-8.1.
|
|
192
|
+
ultralytics-8.1.27.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
|
193
|
+
ultralytics-8.1.27.dist-info/METADATA,sha256=OsT5jhs2f8JgOfM0xhr0MIFYZ8xLYQpm6zghyivE9ec,40330
|
|
194
|
+
ultralytics-8.1.27.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
|
195
|
+
ultralytics-8.1.27.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
|
196
|
+
ultralytics-8.1.27.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
|
197
|
+
ultralytics-8.1.27.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|