tf-models-nightly 2.15.0.dev20240208__py2.py3-none-any.whl → 2.15.0.dev20240210__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- official/vision/configs/common.py +33 -1
- official/vision/dataloaders/retinanet_input.py +7 -0
- official/vision/ops/augment.py +140 -2
- official/vision/ops/augment_test.py +173 -0
- {tf_models_nightly-2.15.0.dev20240208.dist-info → tf_models_nightly-2.15.0.dev20240210.dist-info}/METADATA +1 -1
- {tf_models_nightly-2.15.0.dev20240208.dist-info → tf_models_nightly-2.15.0.dev20240210.dist-info}/RECORD +10 -10
- {tf_models_nightly-2.15.0.dev20240208.dist-info → tf_models_nightly-2.15.0.dev20240210.dist-info}/AUTHORS +0 -0
- {tf_models_nightly-2.15.0.dev20240208.dist-info → tf_models_nightly-2.15.0.dev20240210.dist-info}/LICENSE +0 -0
- {tf_models_nightly-2.15.0.dev20240208.dist-info → tf_models_nightly-2.15.0.dev20240210.dist-info}/WHEEL +0 -0
- {tf_models_nightly-2.15.0.dev20240208.dist-info → tf_models_nightly-2.15.0.dev20240210.dist-info}/top_level.txt +0 -0
@@ -15,7 +15,7 @@
|
|
15
15
|
"""Common configurations."""
|
16
16
|
|
17
17
|
import dataclasses
|
18
|
-
from typing import List, Optional
|
18
|
+
from typing import List, Optional, Sequence
|
19
19
|
|
20
20
|
# Import libraries
|
21
21
|
|
@@ -100,6 +100,35 @@ class MixupAndCutmix(hyperparams.Config):
|
|
100
100
|
label_smoothing: float = 0.1
|
101
101
|
|
102
102
|
|
103
|
+
@dataclasses.dataclass
|
104
|
+
class SSDRandomCropParam(hyperparams.Config):
|
105
|
+
min_object_covered: float = 0.0
|
106
|
+
min_box_overlap: float = 0.5
|
107
|
+
prob_to_apply: float = 0.85
|
108
|
+
|
109
|
+
|
110
|
+
@dataclasses.dataclass
|
111
|
+
class SSDRandomCrop(hyperparams.Config):
|
112
|
+
"""Configuration for SSDRandomCrop.
|
113
|
+
|
114
|
+
Liu et al., SSD: Single shot multibox detector
|
115
|
+
https://arxiv.org/abs/1512.02325.
|
116
|
+
"""
|
117
|
+
ssd_random_crop_params: Sequence[SSDRandomCropParam] = dataclasses.field(
|
118
|
+
default_factory=lambda: (
|
119
|
+
SSDRandomCropParam(min_object_covered=0.0),
|
120
|
+
SSDRandomCropParam(min_object_covered=0.1),
|
121
|
+
SSDRandomCropParam(min_object_covered=0.3),
|
122
|
+
SSDRandomCropParam(min_object_covered=0.5),
|
123
|
+
SSDRandomCropParam(min_object_covered=0.7),
|
124
|
+
SSDRandomCropParam(min_object_covered=0.9),
|
125
|
+
SSDRandomCropParam(min_object_covered=1.0),
|
126
|
+
)
|
127
|
+
)
|
128
|
+
aspect_ratio_range: tuple[float, float] = (0.5, 2.0)
|
129
|
+
area_range: tuple[float, float] = (0.1, 1.0)
|
130
|
+
|
131
|
+
|
103
132
|
@dataclasses.dataclass
|
104
133
|
class Augmentation(hyperparams.OneOfConfig):
|
105
134
|
"""Configuration for input data augmentation.
|
@@ -112,6 +141,9 @@ class Augmentation(hyperparams.OneOfConfig):
|
|
112
141
|
type: Optional[str] = None
|
113
142
|
randaug: RandAugment = dataclasses.field(default_factory=RandAugment)
|
114
143
|
autoaug: AutoAugment = dataclasses.field(default_factory=AutoAugment)
|
144
|
+
ssd_random_crop: SSDRandomCrop = dataclasses.field(
|
145
|
+
default_factory=SSDRandomCrop
|
146
|
+
)
|
115
147
|
|
116
148
|
|
117
149
|
@dataclasses.dataclass
|
@@ -155,6 +155,13 @@ class Parser(parser.Parser):
|
|
155
155
|
translate_const=aug_type.randaug.translate_const,
|
156
156
|
prob_to_apply=aug_type.randaug.prob_to_apply,
|
157
157
|
exclude_ops=aug_type.randaug.exclude_ops)
|
158
|
+
elif aug_type.type == 'ssd_random_crop':
|
159
|
+
logging.info('Using SSD Random Crop.')
|
160
|
+
self._augmenter = augment.SSDRandomCrop(
|
161
|
+
params=aug_type.ssd_random_crop.ssd_random_crop_params,
|
162
|
+
aspect_ratio_range=aug_type.ssd_random_crop.aspect_ratio_range,
|
163
|
+
area_range=aug_type.ssd_random_crop.area_range,
|
164
|
+
)
|
158
165
|
else:
|
159
166
|
raise ValueError(f'Augmentation policy {aug_type.type} not supported.')
|
160
167
|
|
official/vision/ops/augment.py
CHANGED
@@ -26,14 +26,22 @@ MixupAndCutmix:
|
|
26
26
|
RandomErasing, Mixup and Cutmix are inspired by
|
27
27
|
https://github.com/rwightman/pytorch-image-models
|
28
28
|
|
29
|
+
SSDRandCrop Reference:
|
30
|
+
- Liu et al., SSD: Single shot multibox detector:
|
31
|
+
https://arxiv.org/abs/1512.02325
|
32
|
+
- Implementation from TF Object Detection API:
|
33
|
+
https://github.com/tensorflow/models/
|
29
34
|
"""
|
35
|
+
from collections.abc import Sequence
|
30
36
|
import inspect
|
31
37
|
import math
|
32
|
-
from typing import Any,
|
38
|
+
from typing import Any, Iterable, List, Optional, Tuple, Union
|
33
39
|
|
34
40
|
import numpy as np
|
35
41
|
import tensorflow as tf, tf_keras
|
36
42
|
|
43
|
+
from official.vision.configs import common as configs
|
44
|
+
from official.vision.ops import box_ops
|
37
45
|
|
38
46
|
# This signifies the max integer that the controller RNN could predict for the
|
39
47
|
# augmentation scheme.
|
@@ -184,7 +192,7 @@ def _normalize_tuple(value, n, name):
|
|
184
192
|
def gaussian_filter2d(
|
185
193
|
image: tf.Tensor,
|
186
194
|
filter_shape: Union[List[int], Tuple[int, ...], int],
|
187
|
-
sigma: Union[List[float], Tuple[float], float] = 1.0,
|
195
|
+
sigma: Union[List[float], Tuple[float, float], float] = 1.0,
|
188
196
|
padding: str = 'REFLECT',
|
189
197
|
constant_values: Union[int, tf.Tensor] = 0,
|
190
198
|
name: Optional[str] = None,
|
@@ -2781,3 +2789,133 @@ class MixupAndCutmix:
|
|
2781
2789
|
labels = lam * labels_1 + (1. - lam) * labels_2
|
2782
2790
|
|
2783
2791
|
return images, labels
|
2792
|
+
|
2793
|
+
|
2794
|
+
def filter_boxes_by_ioa(
|
2795
|
+
bboxes: tf.Tensor, crop_box: tf.Tensor, min_box_overlap: float
|
2796
|
+
) -> tf.Tensor:
|
2797
|
+
"""Filter boxes by intersection over area (IOA).
|
2798
|
+
|
2799
|
+
The boxes with IOA less than min_box_overlap will be replaced by
|
2800
|
+
(0, 0, 0, 0) so they can be filtered out later.
|
2801
|
+
|
2802
|
+
Args:
|
2803
|
+
bboxes: a float tensor of shape [N, 4] representing normalized bounding box
|
2804
|
+
coordinates.
|
2805
|
+
crop_box: a float tensor of shape [1, 1, 4] representing the normalized crop
|
2806
|
+
box.
|
2807
|
+
min_box_overlap: minimum overlap of the box with the crop box to keep the
|
2808
|
+
box.
|
2809
|
+
|
2810
|
+
Returns:
|
2811
|
+
a tensor of shape [N, 4] with filtered box coordinates replaced by 0.
|
2812
|
+
"""
|
2813
|
+
ioas = box_ops.bbox_intersection_over_area(bboxes[None, ...], crop_box)[0]
|
2814
|
+
keep_boxes = ioas >= min_box_overlap
|
2815
|
+
# Set coordinates to (0, 0, 0, 0) for filtered boxes
|
2816
|
+
return bboxes * tf.cast(keep_boxes, dtype=bboxes.dtype)
|
2817
|
+
|
2818
|
+
|
2819
|
+
def crop_normalized_boxes(
|
2820
|
+
bboxes: tf.Tensor,
|
2821
|
+
ori_image_size: tf.Tensor,
|
2822
|
+
new_image_size: tf.Tensor,
|
2823
|
+
offset: tf.Tensor,
|
2824
|
+
) -> tf.Tensor:
|
2825
|
+
"""Crop normalized boxes.
|
2826
|
+
|
2827
|
+
Args:
|
2828
|
+
bboxes: a float tensor of shape [N, 4] representing normalized box
|
2829
|
+
coordinates.
|
2830
|
+
ori_image_size: an int tensor of shape [2] representing the original image
|
2831
|
+
size.
|
2832
|
+
new_image_size: an int tensor of shape [2] representing the cropped image
|
2833
|
+
size.
|
2834
|
+
offset: an int tensor of shape [2] representing the offset of the crop.
|
2835
|
+
|
2836
|
+
Returns:
|
2837
|
+
a tensor of shape [N, 4] representing the new normalized bounding box
|
2838
|
+
coordinates in the new cropped image.
|
2839
|
+
"""
|
2840
|
+
new_bboxes = box_ops.denormalize_boxes(bboxes, ori_image_size)
|
2841
|
+
new_bboxes -= tf.tile(tf.cast(offset, dtype=tf.float32), [2])[None, ...]
|
2842
|
+
new_bboxes = box_ops.normalize_boxes(new_bboxes, new_image_size)
|
2843
|
+
return tf.clip_by_value(new_bboxes, 0.0, 1.0)
|
2844
|
+
|
2845
|
+
|
2846
|
+
class SSDRandomCrop(ImageAugment):
|
2847
|
+
"""Random crop preprocessing as in the SSD paper.
|
2848
|
+
|
2849
|
+
Liu et al., SSD: Single shot multibox detector
|
2850
|
+
https://arxiv.org/abs/1512.02325.
|
2851
|
+
|
2852
|
+
The implementation originated from TF Object Detection API:
|
2853
|
+
https://github.com/tensorflow/models/blob/f36581036d3346a9496de06c8fd678d23cfe2103/research/object_detection/core/preprocessor.py#L3529
|
2854
|
+
"""
|
2855
|
+
|
2856
|
+
def __init__(
|
2857
|
+
self,
|
2858
|
+
params: Sequence[configs.SSDRandomCropParam] | None = None,
|
2859
|
+
aspect_ratio_range: tuple[float, float] = (0.5, 2.0),
|
2860
|
+
area_range: tuple[float, float] = (0.1, 1.0),
|
2861
|
+
):
|
2862
|
+
"""Apply random crop to the image as in the SSD paper.
|
2863
|
+
|
2864
|
+
The SSD random crop will randomly select one set of the parameters.
|
2865
|
+
|
2866
|
+
Args:
|
2867
|
+
params: a sequence of SSDRandomCropParam that contains:
|
2868
|
+
min_object_covered - a float representing minimum the cropped image
|
2869
|
+
must cover at least this fraction with at least one of the input
|
2870
|
+
bounding boxes.
|
2871
|
+
min_box_overlap - a float representing minimum overlap of the bounding
|
2872
|
+
box with the cropped image to keep the box.
|
2873
|
+
prob_to_apply - a float representing the probability to crop.
|
2874
|
+
aspect_ratio_range: allowed range for aspect ratio of the cropped image.
|
2875
|
+
area_range: allowed range for area ratio between cropped image and the
|
2876
|
+
original image.
|
2877
|
+
"""
|
2878
|
+
if params is None:
|
2879
|
+
params = configs.SSDRandomCrop().ssd_random_crop_params
|
2880
|
+
self.num_cases = len(params)
|
2881
|
+
self.min_object_covered = tf.constant(
|
2882
|
+
[param.min_object_covered for param in params], dtype=tf.float32,
|
2883
|
+
)
|
2884
|
+
self.min_box_overlap = tf.constant(
|
2885
|
+
[param.min_box_overlap for param in params], dtype=tf.float32,
|
2886
|
+
)
|
2887
|
+
self.prob_to_apply = tf.constant(
|
2888
|
+
[param.prob_to_apply for param in params], dtype=tf.float32,
|
2889
|
+
)
|
2890
|
+
self.aspect_ratio_range = aspect_ratio_range
|
2891
|
+
self.area_range = area_range
|
2892
|
+
|
2893
|
+
def distort_with_boxes(
|
2894
|
+
self, image: tf.Tensor, bboxes: tf.Tensor
|
2895
|
+
) -> tuple[tf.Tensor, tf.Tensor]:
|
2896
|
+
"""See base class."""
|
2897
|
+
i_params = tf.random.uniform([], maxval=self.num_cases, dtype=tf.int32)
|
2898
|
+
|
2899
|
+
if tf.random.uniform(shape=[], maxval=1.0) > self.prob_to_apply[i_params]:
|
2900
|
+
return image, bboxes
|
2901
|
+
|
2902
|
+
image_size = tf.shape(image)
|
2903
|
+
bboxes = tf.clip_by_value(bboxes, 0., 1.)
|
2904
|
+
offset, new_image_size, crop_box = tf.image.sample_distorted_bounding_box(
|
2905
|
+
image_size=image_size,
|
2906
|
+
bounding_boxes=bboxes[None, ...],
|
2907
|
+
min_object_covered=self.min_object_covered[i_params],
|
2908
|
+
aspect_ratio_range=self.aspect_ratio_range,
|
2909
|
+
area_range=self.area_range,
|
2910
|
+
max_attempts=100,
|
2911
|
+
use_image_if_no_bounding_boxes=True,
|
2912
|
+
)
|
2913
|
+
new_image = tf.slice(image, offset, new_image_size)
|
2914
|
+
|
2915
|
+
new_bboxes = filter_boxes_by_ioa(
|
2916
|
+
bboxes, crop_box, self.min_box_overlap[i_params]
|
2917
|
+
)
|
2918
|
+
new_bboxes = crop_normalized_boxes(
|
2919
|
+
new_bboxes, image_size[:2], new_image_size[:2], offset[:2]
|
2920
|
+
)
|
2921
|
+
return new_image, new_bboxes
|
@@ -19,11 +19,13 @@ from __future__ import division
|
|
19
19
|
from __future__ import print_function
|
20
20
|
|
21
21
|
import random
|
22
|
+
from unittest import mock
|
22
23
|
from absl.testing import parameterized
|
23
24
|
|
24
25
|
import numpy as np
|
25
26
|
import tensorflow as tf, tf_keras
|
26
27
|
|
28
|
+
from official.vision.configs import common as configs
|
27
29
|
from official.vision.ops import augment
|
28
30
|
|
29
31
|
|
@@ -520,5 +522,176 @@ class MixupAndCutmixTest(tf.test.TestCase, parameterized.TestCase):
|
|
520
522
|
self.assertFalse(tf.math.reduce_all(images == aug_images))
|
521
523
|
|
522
524
|
|
525
|
+
class SSDRandomCropTest(tf.test.TestCase, parameterized.TestCase):
|
526
|
+
|
527
|
+
@parameterized.named_parameters(
|
528
|
+
dict(
|
529
|
+
testcase_name='filter first one',
|
530
|
+
bboxes=[[0, 0, 1, 1], [0, 0, 0.5, 0.5]],
|
531
|
+
crop_box=[[[0, 0, 0.5, 1]]],
|
532
|
+
min_box_overlap=0.6,
|
533
|
+
expected=[[0, 0, 0, 0], [0, 0, 0.5, 0.5]],
|
534
|
+
),
|
535
|
+
dict(
|
536
|
+
testcase_name='empty box list',
|
537
|
+
bboxes=tf.zeros([0, 4], dtype=tf.float32),
|
538
|
+
crop_box=[[[0, 0, 1, 1]]],
|
539
|
+
min_box_overlap=0.5,
|
540
|
+
expected=tf.zeros([0, 4], dtype=tf.float32),
|
541
|
+
),
|
542
|
+
)
|
543
|
+
def test_filter_boxes_by_ioa(
|
544
|
+
self, bboxes, crop_box, min_box_overlap, expected
|
545
|
+
):
|
546
|
+
new_bboxes = augment.filter_boxes_by_ioa(
|
547
|
+
bboxes=tf.constant(bboxes, dtype=tf.float32),
|
548
|
+
crop_box=tf.constant(crop_box, dtype=tf.float32),
|
549
|
+
min_box_overlap=min_box_overlap,
|
550
|
+
)
|
551
|
+
self.assertAllClose(expected, new_bboxes)
|
552
|
+
|
553
|
+
@parameterized.named_parameters(
|
554
|
+
dict(
|
555
|
+
testcase_name='whole image and box',
|
556
|
+
bboxes=[[0, 0, 1, 1], [0.1, 0.2, 0.8, 0.5]],
|
557
|
+
ori_image_size=[200, 600],
|
558
|
+
new_image_size=[100, 200],
|
559
|
+
offset=[70, 100],
|
560
|
+
expected=[[0, 0, 1, 1], [0, 0.1, 0.9, 1]],
|
561
|
+
),
|
562
|
+
dict(
|
563
|
+
testcase_name='zero size boxes',
|
564
|
+
bboxes=tf.zeros([1, 4], dtype=tf.float32),
|
565
|
+
ori_image_size=[200, 600],
|
566
|
+
new_image_size=[100, 200],
|
567
|
+
offset=[70, 100],
|
568
|
+
expected=tf.zeros([1, 4], dtype=tf.float32),
|
569
|
+
),
|
570
|
+
dict(
|
571
|
+
testcase_name='empty box list',
|
572
|
+
bboxes=tf.zeros([0, 4], dtype=tf.float32),
|
573
|
+
ori_image_size=[200, 600],
|
574
|
+
new_image_size=[100, 200],
|
575
|
+
offset=[70, 100],
|
576
|
+
expected=tf.zeros([0, 4], dtype=tf.float32),
|
577
|
+
),
|
578
|
+
)
|
579
|
+
def test_crop_normalized_boxes(
|
580
|
+
self, bboxes, ori_image_size, new_image_size, offset, expected
|
581
|
+
):
|
582
|
+
got = augment.crop_normalized_boxes(
|
583
|
+
bboxes=tf.constant(bboxes, dtype=tf.float32),
|
584
|
+
ori_image_size=tf.constant(ori_image_size, dtype=tf.int32),
|
585
|
+
new_image_size=tf.constant(new_image_size, dtype=tf.int32),
|
586
|
+
offset=tf.constant(offset, dtype=tf.int32),
|
587
|
+
)
|
588
|
+
self.assertAllClose(expected, got)
|
589
|
+
|
590
|
+
@parameterized.named_parameters(
|
591
|
+
dict(
|
592
|
+
testcase_name='uint8 image',
|
593
|
+
image=tf.zeros([320, 256, 3], dtype=tf.uint8),
|
594
|
+
),
|
595
|
+
dict(
|
596
|
+
testcase_name='float32 image',
|
597
|
+
image=tf.zeros([320, 256, 3], dtype=tf.float32),
|
598
|
+
),
|
599
|
+
)
|
600
|
+
def test_distort_with_boxes_output_shape(self, image):
|
601
|
+
bboxes = tf.constant([[0, 0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0]])
|
602
|
+
augmenter = augment.SSDRandomCrop()
|
603
|
+
new_image, new_bboxes = augmenter.distort_with_boxes(
|
604
|
+
image=image,
|
605
|
+
bboxes=bboxes,
|
606
|
+
)
|
607
|
+
self.assertDTypeEqual(new_image, image.dtype)
|
608
|
+
self.assertDTypeEqual(new_bboxes, bboxes.dtype)
|
609
|
+
self.assertShapeEqual(new_bboxes, bboxes)
|
610
|
+
self.assertAllGreaterEqual(new_bboxes, 0)
|
611
|
+
self.assertAllLessEqual(new_bboxes, 1)
|
612
|
+
|
613
|
+
def test_distort_with_empty_bboxes(self):
|
614
|
+
image = tf.zeros([320, 256, 3], dtype=tf.uint8)
|
615
|
+
bboxes = tf.zeros([0, 4], dtype=tf.float32)
|
616
|
+
augmenter = augment.SSDRandomCrop()
|
617
|
+
new_image, new_bboxes = augmenter.distort_with_boxes(
|
618
|
+
image=image,
|
619
|
+
bboxes=bboxes,
|
620
|
+
)
|
621
|
+
self.assertDTypeEqual(new_image, image.dtype)
|
622
|
+
self.assertDTypeEqual(new_bboxes, bboxes.dtype)
|
623
|
+
self.assertShapeEqual(new_bboxes, bboxes)
|
624
|
+
|
625
|
+
@parameterized.named_parameters(
|
626
|
+
dict(
|
627
|
+
testcase_name='uint8 image',
|
628
|
+
image=tf.zeros([320, 256, 3], dtype=tf.uint8),
|
629
|
+
),
|
630
|
+
dict(
|
631
|
+
testcase_name='float32 image',
|
632
|
+
image=tf.zeros([320, 256, 3], dtype=tf.float32),
|
633
|
+
),
|
634
|
+
)
|
635
|
+
def test_distort_with_boxes_run_as_tf_function(self, image):
|
636
|
+
bboxes = tf.constant([[0, 0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0]])
|
637
|
+
augmenter = augment.SSDRandomCrop()
|
638
|
+
aug_function = tf.function(augmenter.distort_with_boxes)
|
639
|
+
new_image, new_bboxes = aug_function(image=image, bboxes=bboxes)
|
640
|
+
self.assertDTypeEqual(new_image, image.dtype)
|
641
|
+
self.assertDTypeEqual(new_bboxes, bboxes.dtype)
|
642
|
+
self.assertShapeEqual(new_bboxes, bboxes)
|
643
|
+
self.assertAllGreaterEqual(new_bboxes, 0)
|
644
|
+
self.assertAllLessEqual(new_bboxes, 1)
|
645
|
+
|
646
|
+
def test_distort_with_boxes_run_as_tf_function_empty_bboxes(self):
|
647
|
+
image = tf.zeros([320, 256, 3], dtype=tf.uint8)
|
648
|
+
bboxes = tf.zeros([0, 4], dtype=tf.float32)
|
649
|
+
augmenter = augment.SSDRandomCrop()
|
650
|
+
aug_function = tf.function(augmenter.distort_with_boxes)
|
651
|
+
new_image, new_bboxes = aug_function(image=image, bboxes=bboxes)
|
652
|
+
self.assertDTypeEqual(new_image, image.dtype)
|
653
|
+
self.assertDTypeEqual(new_bboxes, bboxes.dtype)
|
654
|
+
self.assertShapeEqual(new_bboxes, bboxes)
|
655
|
+
|
656
|
+
def test_distort_with_boxes_filter_and_crop(self):
|
657
|
+
augmenter = augment.SSDRandomCrop(
|
658
|
+
params=[
|
659
|
+
configs.SSDRandomCropParam(
|
660
|
+
min_object_covered=0.0,
|
661
|
+
min_box_overlap=0.5,
|
662
|
+
prob_to_apply=1.0,
|
663
|
+
)
|
664
|
+
],
|
665
|
+
)
|
666
|
+
image = tf.zeros([320, 256, 3], dtype=tf.uint8)
|
667
|
+
bboxes = tf.constant(
|
668
|
+
[
|
669
|
+
[0., 0., 1., 1.], # filtered by low box overlap
|
670
|
+
[0.25, 0.75, 0.5, 2.], # kept with box clipped
|
671
|
+
[0.25, 0.48, 0.5, 0.75], # kept with box clipped
|
672
|
+
],
|
673
|
+
dtype=tf.float32,
|
674
|
+
)
|
675
|
+
with mock.patch.object(
|
676
|
+
tf.image, 'sample_distorted_bounding_box', autospec=True
|
677
|
+
) as mock_sample_box:
|
678
|
+
# crop box is an upper right box
|
679
|
+
offset = tf.constant([0, 128, 0], dtype=tf.int32)
|
680
|
+
new_image_size = tf.constant([160, 128, -1], dtype=tf.int32)
|
681
|
+
crop_box = tf.constant([[[0, 0.5, 0.5, 1.0]]], dtype=tf.float32)
|
682
|
+
mock_sample_box.return_value = offset, new_image_size, crop_box
|
683
|
+
new_image, new_bboxes = augmenter.distort_with_boxes(
|
684
|
+
image=image,
|
685
|
+
bboxes=bboxes,
|
686
|
+
)
|
687
|
+
self.assertAllClose(tf.zeros([160, 128, 3], dtype=tf.uint8), new_image)
|
688
|
+
self.assertAllClose(
|
689
|
+
tf.constant(
|
690
|
+
[[0., 0., 0., 0.], [0.5, 0.5, 1., 1.], [0.5, 0., 1., 0.5]],
|
691
|
+
dtype=tf.float32,
|
692
|
+
),
|
693
|
+
new_bboxes,
|
694
|
+
)
|
695
|
+
|
523
696
|
if __name__ == '__main__':
|
524
697
|
tf.test.main()
|
@@ -912,7 +912,7 @@ official/vision/train_spatial_partitioning.py,sha256=xpEusyM-fEPVGyBuzlkRgsYGaPA
|
|
912
912
|
official/vision/configs/__init__.py,sha256=-iKVbGCvFMGSHMC89utzGXvZ83BhW6JnbEfS38lbW3M,1045
|
913
913
|
official/vision/configs/backbones.py,sha256=-jVezph39bV2lQHRxI4Eu1Q0aj00epn417AgObKNoAM,5547
|
914
914
|
official/vision/configs/backbones_3d.py,sha256=0lJsUzeYmuC5xiosOwrqlmgR1gkOa4tpSaxDbYYU7FE,3614
|
915
|
-
official/vision/configs/common.py,sha256=
|
915
|
+
official/vision/configs/common.py,sha256=BCWBuIrVJ1QhgNZwRIUB6zY3s9ZlWBapqO9meUqeeZU,6420
|
916
916
|
official/vision/configs/decoders.py,sha256=_wG6MH1RzYuhMrvJu5menR7gDvklEXDSxlHJwzVF6H4,2080
|
917
917
|
official/vision/configs/image_classification.py,sha256=JiJUYKI82NtlNXEjATF0JzSmheQr7ywrfk7lpgV4xz8,24563
|
918
918
|
official/vision/configs/image_classification_test.py,sha256=cdlkY5fPaqNZUc4-A2OOEUS1v3T-ekxqUc0Vm4IknBI,1867
|
@@ -942,7 +942,7 @@ official/vision/dataloaders/input_reader.py,sha256=CHojw8PJKf74jl8Q3rtH2ylwhmTYg
|
|
942
942
|
official/vision/dataloaders/input_reader_factory.py,sha256=WpvSA8qyqAo3wkmme4WqXpICBVg0SuR6_nNWHZ0ECM0,1623
|
943
943
|
official/vision/dataloaders/maskrcnn_input.py,sha256=iCc08yYD-7mvIPojgBjm_nSvoQACXWCIeZNZN8CfXSs,16822
|
944
944
|
official/vision/dataloaders/parser.py,sha256=nMXnhigMa_ascSJ2OK88xi4HdE9xvfL3G4oMrHau-t4,2315
|
945
|
-
official/vision/dataloaders/retinanet_input.py,sha256=
|
945
|
+
official/vision/dataloaders/retinanet_input.py,sha256=JOJQ-2rC55hkrPwt1nBE-nWmr-CER4nbaOTqGRMjsSY,17729
|
946
946
|
official/vision/dataloaders/segmentation_input.py,sha256=EV5mVYyDzmNefGtHTUw7tecMGpajTnyrfY7nV_zugnY,11838
|
947
947
|
official/vision/dataloaders/tf_example_decoder.py,sha256=9yCT6uSLMpmw50w7zdaRR_BXy6vIvliLZntrYAgzD18,8647
|
948
948
|
official/vision/dataloaders/tf_example_decoder_test.py,sha256=PHxneXHn5-eIMdmk1uI4IPLa178kTCifa4EF53ik2Jo,12629
|
@@ -1060,8 +1060,8 @@ official/vision/ops/anchor.py,sha256=ruGKslsQid6A0yfiJuG6sf_JEbJuHHpWL77D9s3SNx4
|
|
1060
1060
|
official/vision/ops/anchor_generator.py,sha256=293RH42qBk8zMtfiJovtBJ6Hn9yi4Ln-Az-sGXNAOR0,7274
|
1061
1061
|
official/vision/ops/anchor_generator_test.py,sha256=grVbHuxlu1W7xbjMErV3q0ARxCesHN6q-7LvLcCi-_4,5296
|
1062
1062
|
official/vision/ops/anchor_test.py,sha256=OQPiWHNUkQi3KaMWzOYd9HuqJa9vQ6Za4_6tf79X0qY,7633
|
1063
|
-
official/vision/ops/augment.py,sha256=
|
1064
|
-
official/vision/ops/augment_test.py,sha256=
|
1063
|
+
official/vision/ops/augment.py,sha256=cL_g4gHRabtZrEXyDUyjBD-1d0JBib2Wilf2AOTlub0,108239
|
1064
|
+
official/vision/ops/augment_test.py,sha256=R4A5UZwVc_LCTgLCoTRf25BS8v21kATjUMAFrrc-g28,25711
|
1065
1065
|
official/vision/ops/box_matcher.py,sha256=AvZd7CUUZnT4FwETLyVz3Uxb3gO-o94OwlZrvm7CtX0,9067
|
1066
1066
|
official/vision/ops/box_matcher_test.py,sha256=YdxaTITaUyKBA27Pss5MZKF2ibBfSu879222c7lKngU,2438
|
1067
1067
|
official/vision/ops/box_ops.py,sha256=FRKLYzVYd3zFqEYX6Ql4Pr39csvR2Vc-OQv-WxhegFM,34618
|
@@ -1158,9 +1158,9 @@ tensorflow_models/__init__.py,sha256=etxw45SHxuwFCRX5qGxGMP83II0JfJulzNl5GSNJvhw
|
|
1158
1158
|
tensorflow_models/tensorflow_models_test.py,sha256=AxUYUdiQn416UR7jg0h6rmv688esvlKDfpyDCIQkF18,1395
|
1159
1159
|
tensorflow_models/nlp/__init__.py,sha256=4tA5Pf4qaFwT-fIFOpX7x7FHJpnyJT-5UgOeFYTyMlc,807
|
1160
1160
|
tensorflow_models/vision/__init__.py,sha256=zBorY_v5xva1uI-qxhZO3Qh-Dii-Suq6wEYh6hKHDfc,833
|
1161
|
-
tf_models_nightly-2.15.0.
|
1162
|
-
tf_models_nightly-2.15.0.
|
1163
|
-
tf_models_nightly-2.15.0.
|
1164
|
-
tf_models_nightly-2.15.0.
|
1165
|
-
tf_models_nightly-2.15.0.
|
1166
|
-
tf_models_nightly-2.15.0.
|
1161
|
+
tf_models_nightly-2.15.0.dev20240210.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
|
1162
|
+
tf_models_nightly-2.15.0.dev20240210.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
|
1163
|
+
tf_models_nightly-2.15.0.dev20240210.dist-info/METADATA,sha256=2q87qPU9_0-DLQjkqVlZmKZ8K6B4tH1ZRbOOSCAD_ek,1432
|
1164
|
+
tf_models_nightly-2.15.0.dev20240210.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
|
1165
|
+
tf_models_nightly-2.15.0.dev20240210.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
|
1166
|
+
tf_models_nightly-2.15.0.dev20240210.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|