tf-models-nightly 2.17.0.dev20240313__py2.py3-none-any.whl → 2.17.0.dev20240320__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- official/nlp/modeling/layers/__init__.py +1 -0
- official/vision/configs/semantic_segmentation.py +4 -0
- official/vision/dataloaders/segmentation_input.py +117 -54
- official/vision/ops/anchor.py +110 -76
- official/vision/ops/anchor_generator.py +1 -1
- official/vision/ops/anchor_generator_test.py +3 -3
- official/vision/ops/anchor_test.py +55 -25
- official/vision/ops/preprocess_ops.py +38 -7
- official/vision/ops/preprocess_ops_test.py +40 -0
- official/vision/tasks/semantic_segmentation.py +2 -1
- {tf_models_nightly-2.17.0.dev20240313.dist-info → tf_models_nightly-2.17.0.dev20240320.dist-info}/METADATA +1 -1
- {tf_models_nightly-2.17.0.dev20240313.dist-info → tf_models_nightly-2.17.0.dev20240320.dist-info}/RECORD +16 -16
- {tf_models_nightly-2.17.0.dev20240313.dist-info → tf_models_nightly-2.17.0.dev20240320.dist-info}/AUTHORS +0 -0
- {tf_models_nightly-2.17.0.dev20240313.dist-info → tf_models_nightly-2.17.0.dev20240320.dist-info}/LICENSE +0 -0
- {tf_models_nightly-2.17.0.dev20240313.dist-info → tf_models_nightly-2.17.0.dev20240320.dist-info}/WHEEL +0 -0
- {tf_models_nightly-2.17.0.dev20240313.dist-info → tf_models_nightly-2.17.0.dev20240320.dist-info}/top_level.txt +0 -0
@@ -46,6 +46,7 @@ from official.nlp.modeling.layers.moe import MoeLayerWithBackbone
|
|
46
46
|
from official.nlp.modeling.layers.multi_channel_attention import *
|
47
47
|
from official.nlp.modeling.layers.on_device_embedding import OnDeviceEmbedding
|
48
48
|
from official.nlp.modeling.layers.pack_optimization import PackBertEmbeddings
|
49
|
+
from official.nlp.modeling.layers.pack_optimization import StridedReZeroTransformer
|
49
50
|
from official.nlp.modeling.layers.pack_optimization import StridedTransformerEncoderBlock
|
50
51
|
from official.nlp.modeling.layers.pack_optimization import StridedTransformerScaffold
|
51
52
|
from official.nlp.modeling.layers.per_dim_scale_attention import PerDimScaleAttention
|
@@ -91,6 +91,10 @@ class DataConfig(cfg.DataConfig):
|
|
91
91
|
)
|
92
92
|
additional_dense_features: List[DenseFeatureConfig] = dataclasses.field(
|
93
93
|
default_factory=list)
|
94
|
+
# If `centered_crop` is set to True, then resized crop
|
95
|
+
# (if smaller than padded size) is place in the center of the image.
|
96
|
+
# Default behaviour is to place it at left top corner.
|
97
|
+
centered_crop: bool = False
|
94
98
|
|
95
99
|
|
96
100
|
@dataclasses.dataclass
|
@@ -25,48 +25,54 @@ from official.vision.ops import preprocess_ops
|
|
25
25
|
class Decoder(decoder.Decoder):
|
26
26
|
"""A tf.Example decoder for segmentation task."""
|
27
27
|
|
28
|
-
def __init__(
|
29
|
-
|
30
|
-
|
28
|
+
def __init__(
|
29
|
+
self,
|
30
|
+
image_feature=config_lib.DenseFeatureConfig(),
|
31
|
+
additional_dense_features=None,
|
32
|
+
):
|
31
33
|
self._keys_to_features = {
|
32
|
-
'image/encoded':
|
33
|
-
|
34
|
-
'image/
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
tf.io.FixedLenFeature((), tf.string, default_value='')
|
34
|
+
'image/encoded': tf.io.FixedLenFeature((), tf.string, default_value=''),
|
35
|
+
'image/height': tf.io.FixedLenFeature((), tf.int64, default_value=0),
|
36
|
+
'image/width': tf.io.FixedLenFeature((), tf.int64, default_value=0),
|
37
|
+
'image/segmentation/class/encoded': tf.io.FixedLenFeature(
|
38
|
+
(), tf.string, default_value=''
|
39
|
+
),
|
40
|
+
image_feature.feature_name: tf.io.FixedLenFeature(
|
41
|
+
(), tf.string, default_value=''
|
42
|
+
),
|
42
43
|
}
|
43
44
|
if additional_dense_features:
|
44
45
|
for feature in additional_dense_features:
|
45
46
|
self._keys_to_features[feature.feature_name] = tf.io.FixedLenFeature(
|
46
|
-
(), tf.string, default_value=''
|
47
|
+
(), tf.string, default_value=''
|
48
|
+
)
|
47
49
|
|
48
50
|
def decode(self, serialized_example):
|
49
|
-
return tf.io.parse_single_example(
|
50
|
-
|
51
|
+
return tf.io.parse_single_example(
|
52
|
+
serialized_example, self._keys_to_features
|
53
|
+
)
|
51
54
|
|
52
55
|
|
53
56
|
class Parser(parser.Parser):
|
54
57
|
"""Parser to parse an image and its annotations into a dictionary of tensors."""
|
55
58
|
|
56
|
-
def __init__(
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
59
|
+
def __init__(
|
60
|
+
self,
|
61
|
+
output_size,
|
62
|
+
crop_size=None,
|
63
|
+
resize_eval_groundtruth=True,
|
64
|
+
gt_is_matting_map=False,
|
65
|
+
groundtruth_padded_size=None,
|
66
|
+
ignore_label=255,
|
67
|
+
aug_rand_hflip=False,
|
68
|
+
preserve_aspect_ratio=True,
|
69
|
+
aug_scale_min=1.0,
|
70
|
+
aug_scale_max=1.0,
|
71
|
+
dtype='float32',
|
72
|
+
image_feature=config_lib.DenseFeatureConfig(),
|
73
|
+
additional_dense_features=None,
|
74
|
+
centered_crop=False,
|
75
|
+
):
|
70
76
|
"""Initializes parameters for parsing annotations in the dataset.
|
71
77
|
|
72
78
|
Args:
|
@@ -100,13 +106,18 @@ class Parser(parser.Parser):
|
|
100
106
|
dataset mean/stddev.
|
101
107
|
additional_dense_features: `list` of DenseFeatureConfig for additional
|
102
108
|
dense features.
|
109
|
+
centered_crop: If `centered_crop` is set to True, then resized crop (if
|
110
|
+
smaller than padded size) is place in the center of the image. Default
|
111
|
+
behaviour is to place it at left top corner.
|
103
112
|
"""
|
104
113
|
self._output_size = output_size
|
105
114
|
self._crop_size = crop_size
|
106
115
|
self._resize_eval_groundtruth = resize_eval_groundtruth
|
107
116
|
if (not resize_eval_groundtruth) and (groundtruth_padded_size is None):
|
108
|
-
raise ValueError(
|
109
|
-
|
117
|
+
raise ValueError(
|
118
|
+
'groundtruth_padded_size ([height, width]) needs to be'
|
119
|
+
'specified when resize_eval_groundtruth is False.'
|
120
|
+
)
|
110
121
|
self._gt_is_matting_map = gt_is_matting_map
|
111
122
|
self._groundtruth_padded_size = groundtruth_padded_size
|
112
123
|
self._ignore_label = ignore_label
|
@@ -122,6 +133,12 @@ class Parser(parser.Parser):
|
|
122
133
|
|
123
134
|
self._image_feature = image_feature
|
124
135
|
self._additional_dense_features = additional_dense_features
|
136
|
+
self._centered_crop = centered_crop
|
137
|
+
if self._centered_crop and not self._resize_eval_groundtruth:
|
138
|
+
raise ValueError(
|
139
|
+
'centered_crop is only supported when resize_eval_groundtruth is'
|
140
|
+
' True.'
|
141
|
+
)
|
125
142
|
|
126
143
|
def _prepare_image_and_label(self, data):
|
127
144
|
"""Prepare normalized image and label."""
|
@@ -129,21 +146,25 @@ class Parser(parser.Parser):
|
|
129
146
|
width = data['image/width']
|
130
147
|
|
131
148
|
label = tf.io.decode_image(
|
132
|
-
data['image/segmentation/class/encoded'], channels=1
|
149
|
+
data['image/segmentation/class/encoded'], channels=1
|
150
|
+
)
|
133
151
|
label = tf.reshape(label, (1, height, width))
|
134
152
|
label = tf.cast(label, tf.float32)
|
135
153
|
|
136
154
|
image = tf.io.decode_image(
|
137
155
|
data[self._image_feature.feature_name],
|
138
156
|
channels=self._image_feature.num_channels,
|
139
|
-
dtype=tf.uint8
|
157
|
+
dtype=tf.uint8,
|
158
|
+
)
|
140
159
|
image = tf.reshape(image, (height, width, self._image_feature.num_channels))
|
141
160
|
# Normalizes the image feature with mean and std values, which are divided
|
142
161
|
# by 255 because an uint8 image are re-scaled automatically. Images other
|
143
162
|
# than uint8 type will be wrongly normalized.
|
144
163
|
image = preprocess_ops.normalize_image(
|
145
|
-
image,
|
146
|
-
[
|
164
|
+
image,
|
165
|
+
[mean / 255.0 for mean in self._image_feature.mean],
|
166
|
+
[stddev / 255.0 for stddev in self._image_feature.stddev],
|
167
|
+
)
|
147
168
|
|
148
169
|
if self._additional_dense_features:
|
149
170
|
input_list = [image]
|
@@ -151,11 +172,14 @@ class Parser(parser.Parser):
|
|
151
172
|
feature = tf.io.decode_image(
|
152
173
|
data[feature_cfg.feature_name],
|
153
174
|
channels=feature_cfg.num_channels,
|
154
|
-
dtype=tf.uint8
|
175
|
+
dtype=tf.uint8,
|
176
|
+
)
|
155
177
|
feature = tf.reshape(feature, (height, width, feature_cfg.num_channels))
|
156
178
|
feature = preprocess_ops.normalize_image(
|
157
|
-
feature,
|
158
|
-
[
|
179
|
+
feature,
|
180
|
+
[mean / 255.0 for mean in feature_cfg.mean],
|
181
|
+
[stddev / 255.0 for stddev in feature_cfg.stddev],
|
182
|
+
)
|
159
183
|
input_list.append(feature)
|
160
184
|
concat_input = tf.concat(input_list, axis=2)
|
161
185
|
else:
|
@@ -164,7 +188,8 @@ class Parser(parser.Parser):
|
|
164
188
|
if not self._preserve_aspect_ratio:
|
165
189
|
label = tf.reshape(label, [data['image/height'], data['image/width'], 1])
|
166
190
|
concat_input = tf.image.resize(
|
167
|
-
concat_input, self._output_size, method='bilinear'
|
191
|
+
concat_input, self._output_size, method='bilinear'
|
192
|
+
)
|
168
193
|
label = tf.image.resize(label, self._output_size, method='nearest')
|
169
194
|
label = tf.reshape(label[:, :, -1], [1] + self._output_size)
|
170
195
|
|
@@ -195,14 +220,16 @@ class Parser(parser.Parser):
|
|
195
220
|
|
196
221
|
image_mask = tf.concat([image, label], axis=2)
|
197
222
|
image_mask_crop = tf.image.random_crop(
|
198
|
-
image_mask, self._crop_size + [tf.shape(image_mask)[-1]]
|
223
|
+
image_mask, self._crop_size + [tf.shape(image_mask)[-1]]
|
224
|
+
)
|
199
225
|
image = image_mask_crop[:, :, :-1]
|
200
226
|
label = tf.reshape(image_mask_crop[:, :, -1], [1] + self._crop_size)
|
201
227
|
|
202
228
|
# Flips image randomly during training.
|
203
229
|
if self._aug_rand_hflip:
|
204
230
|
image, _, label = preprocess_ops.random_horizontal_flip(
|
205
|
-
image, masks=label
|
231
|
+
image, masks=label
|
232
|
+
)
|
206
233
|
|
207
234
|
train_image_size = self._crop_size if self._crop_size else self._output_size
|
208
235
|
# Resizes and crops image.
|
@@ -211,7 +238,9 @@ class Parser(parser.Parser):
|
|
211
238
|
train_image_size,
|
212
239
|
train_image_size,
|
213
240
|
aug_scale_min=self._aug_scale_min,
|
214
|
-
aug_scale_max=self._aug_scale_max
|
241
|
+
aug_scale_max=self._aug_scale_max,
|
242
|
+
centered_crop=self._centered_crop,
|
243
|
+
)
|
215
244
|
|
216
245
|
# Resizes and crops boxes.
|
217
246
|
image_scale = image_info[2, :]
|
@@ -221,11 +250,17 @@ class Parser(parser.Parser):
|
|
221
250
|
# The label is first offset by +1 and then padded with 0.
|
222
251
|
label += 1
|
223
252
|
label = tf.expand_dims(label, axis=3)
|
224
|
-
label = preprocess_ops.resize_and_crop_masks(
|
225
|
-
|
253
|
+
label = preprocess_ops.resize_and_crop_masks(
|
254
|
+
label,
|
255
|
+
image_scale,
|
256
|
+
train_image_size,
|
257
|
+
offset,
|
258
|
+
centered_crop=self._centered_crop,
|
259
|
+
)
|
226
260
|
label -= 1
|
227
261
|
label = tf.where(
|
228
|
-
tf.equal(label, -1), self._ignore_label * tf.ones_like(label), label
|
262
|
+
tf.equal(label, -1), self._ignore_label * tf.ones_like(label), label
|
263
|
+
)
|
229
264
|
label = tf.squeeze(label, axis=0)
|
230
265
|
valid_mask = tf.not_equal(label, self._ignore_label)
|
231
266
|
|
@@ -255,30 +290,58 @@ class Parser(parser.Parser):
|
|
255
290
|
|
256
291
|
# Resizes and crops image.
|
257
292
|
image, image_info = preprocess_ops.resize_and_crop_image(
|
258
|
-
image,
|
293
|
+
image,
|
294
|
+
self._output_size,
|
295
|
+
self._output_size,
|
296
|
+
centered_crop=self._centered_crop,
|
297
|
+
)
|
259
298
|
|
260
299
|
if self._resize_eval_groundtruth:
|
261
300
|
# Resizes eval masks to match input image sizes. In that case, mean IoU
|
262
301
|
# is computed on output_size not the original size of the images.
|
263
302
|
image_scale = image_info[2, :]
|
264
303
|
offset = image_info[3, :]
|
265
|
-
label = preprocess_ops.resize_and_crop_masks(
|
266
|
-
|
304
|
+
label = preprocess_ops.resize_and_crop_masks(
|
305
|
+
label,
|
306
|
+
image_scale,
|
307
|
+
self._output_size,
|
308
|
+
offset,
|
309
|
+
centered_crop=self._centered_crop,
|
310
|
+
)
|
267
311
|
else:
|
268
|
-
|
269
|
-
|
270
|
-
|
312
|
+
if self._centered_crop:
|
313
|
+
label_size = tf.cast(tf.shape(label)[0:2], tf.int32)
|
314
|
+
label = tf.image.pad_to_bounding_box(
|
315
|
+
label,
|
316
|
+
tf.maximum(
|
317
|
+
(self._groundtruth_padded_size[0] - label_size[0]) // 2, 0
|
318
|
+
),
|
319
|
+
tf.maximum(
|
320
|
+
(self._groundtruth_padded_size[1] - label_size[1]) // 2, 0
|
321
|
+
),
|
322
|
+
self._groundtruth_padded_size[0],
|
323
|
+
self._groundtruth_padded_size[1],
|
324
|
+
)
|
325
|
+
else:
|
326
|
+
label = tf.image.pad_to_bounding_box(
|
327
|
+
label,
|
328
|
+
0,
|
329
|
+
0,
|
330
|
+
self._groundtruth_padded_size[0],
|
331
|
+
self._groundtruth_padded_size[1],
|
332
|
+
)
|
271
333
|
|
272
334
|
label -= 1
|
273
335
|
label = tf.where(
|
274
|
-
tf.equal(label, -1), self._ignore_label * tf.ones_like(label), label
|
336
|
+
tf.equal(label, -1), self._ignore_label * tf.ones_like(label), label
|
337
|
+
)
|
275
338
|
label = tf.squeeze(label, axis=0)
|
276
339
|
|
277
340
|
valid_mask = tf.not_equal(label, self._ignore_label)
|
278
341
|
labels = {
|
279
342
|
'masks': label,
|
280
343
|
'valid_masks': valid_mask,
|
281
|
-
'image_info': image_info
|
344
|
+
'image_info': image_info,
|
282
345
|
}
|
283
346
|
|
284
347
|
# Cast image as self._dtype
|
official/vision/ops/anchor.py
CHANGED
@@ -22,7 +22,6 @@ from typing import Dict, Optional, Tuple
|
|
22
22
|
|
23
23
|
import tensorflow as tf, tf_keras
|
24
24
|
|
25
|
-
from official.vision.ops import anchor_generator
|
26
25
|
from official.vision.ops import box_matcher
|
27
26
|
from official.vision.ops import iou_similarity
|
28
27
|
from official.vision.ops import target_gather
|
@@ -32,7 +31,38 @@ from official.vision.utils.object_detection import faster_rcnn_box_coder
|
|
32
31
|
|
33
32
|
|
34
33
|
class Anchor(object):
|
35
|
-
"""Anchor class for anchor-based object detectors.
|
34
|
+
"""Anchor class for anchor-based object detectors.
|
35
|
+
|
36
|
+
Example:
|
37
|
+
```python
|
38
|
+
anchor_boxes = Anchor(
|
39
|
+
min_level=3,
|
40
|
+
max_level=4,
|
41
|
+
num_scales=2,
|
42
|
+
aspect_ratios=[0.5, 1., 2.],
|
43
|
+
anchor_size=4.,
|
44
|
+
image_size=[256, 256],
|
45
|
+
).multilevel_boxes
|
46
|
+
```
|
47
|
+
|
48
|
+
Attributes:
|
49
|
+
min_level: integer number of minimum level of the output feature pyramid.
|
50
|
+
max_level: integer number of maximum level of the output feature pyramid.
|
51
|
+
num_scales: integer number representing intermediate scales added on each
|
52
|
+
level. For instances, num_scales=2 adds one additional intermediate
|
53
|
+
anchor scales [2^0, 2^0.5] on each level.
|
54
|
+
aspect_ratios: list of float numbers representing the aspect ratio anchors
|
55
|
+
added on each level. The number indicates the ratio of width to height.
|
56
|
+
For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each
|
57
|
+
scale level.
|
58
|
+
anchor_size: float number representing the scale of size of the base
|
59
|
+
anchor to the feature stride 2^level.
|
60
|
+
image_size: a list of integer numbers or Tensors representing [height,
|
61
|
+
width] of the input image size.
|
62
|
+
multilevel_boxes: an OrderedDict from level to the generated anchor boxes of
|
63
|
+
shape [height_l, width_l, num_anchors_per_location * 4].
|
64
|
+
anchors_per_location: number of anchors per pixel location.
|
65
|
+
"""
|
36
66
|
|
37
67
|
def __init__(
|
38
68
|
self,
|
@@ -43,57 +73,40 @@ class Anchor(object):
|
|
43
73
|
anchor_size,
|
44
74
|
image_size,
|
45
75
|
):
|
46
|
-
"""
|
47
|
-
|
48
|
-
Args:
|
49
|
-
min_level: integer number of minimum level of the output feature pyramid.
|
50
|
-
max_level: integer number of maximum level of the output feature pyramid.
|
51
|
-
num_scales: integer number representing intermediate scales added on each
|
52
|
-
level. For instances, num_scales=2 adds one additional intermediate
|
53
|
-
anchor scales [2^0, 2^0.5] on each level.
|
54
|
-
aspect_ratios: list of float numbers representing the aspect ratio anchors
|
55
|
-
added on each level. The number indicates the ratio of width to height.
|
56
|
-
For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each
|
57
|
-
scale level.
|
58
|
-
anchor_size: float number representing the scale of size of the base
|
59
|
-
anchor to the feature stride 2^level.
|
60
|
-
image_size: a list of integer numbers or Tensors representing [height,
|
61
|
-
width] of the input image size.The image_size should be divided by the
|
62
|
-
largest feature stride 2^max_level.
|
63
|
-
"""
|
76
|
+
"""Initializes the instance."""
|
64
77
|
self.min_level = min_level
|
65
78
|
self.max_level = max_level
|
66
79
|
self.num_scales = num_scales
|
67
80
|
self.aspect_ratios = aspect_ratios
|
68
81
|
self.anchor_size = anchor_size
|
69
82
|
self.image_size = image_size
|
70
|
-
self.
|
83
|
+
self.multilevel_boxes = self._generate_multilevel_boxes()
|
71
84
|
|
72
|
-
def
|
85
|
+
def _generate_multilevel_boxes(self) -> Dict[str, tf.Tensor]:
|
73
86
|
"""Generates multi-scale anchor boxes.
|
74
87
|
|
75
88
|
Returns:
|
76
|
-
|
77
|
-
|
89
|
+
An OrderedDict from level to anchor boxes of shape [height_l, width_l,
|
90
|
+
num_anchors_per_location * 4].
|
78
91
|
"""
|
79
|
-
|
92
|
+
multilevel_boxes = collections.OrderedDict()
|
80
93
|
for level in range(self.min_level, self.max_level + 1):
|
81
94
|
boxes_l = []
|
82
|
-
|
83
|
-
|
95
|
+
feat_size_y = math.ceil(self.image_size[0] / 2**level)
|
96
|
+
feat_size_x = math.ceil(self.image_size[1] / 2**level)
|
97
|
+
stride_y = tf.cast(self.image_size[0] / feat_size_y, tf.float32)
|
98
|
+
stride_x = tf.cast(self.image_size[1] / feat_size_x, tf.float32)
|
99
|
+
x = tf.range(stride_x / 2, self.image_size[1], stride_x)
|
100
|
+
y = tf.range(stride_y / 2, self.image_size[0], stride_y)
|
101
|
+
xv, yv = tf.meshgrid(x, y)
|
84
102
|
for scale in range(self.num_scales):
|
85
103
|
for aspect_ratio in self.aspect_ratios:
|
86
|
-
intermidate_scale = 2 ** (scale /
|
87
|
-
base_anchor_size = self.anchor_size *
|
104
|
+
intermidate_scale = 2 ** (scale / self.num_scales)
|
105
|
+
base_anchor_size = self.anchor_size * 2**level * intermidate_scale
|
88
106
|
aspect_x = aspect_ratio**0.5
|
89
107
|
aspect_y = aspect_ratio**-0.5
|
90
108
|
half_anchor_size_x = base_anchor_size * aspect_x / 2.0
|
91
109
|
half_anchor_size_y = base_anchor_size * aspect_y / 2.0
|
92
|
-
x = tf.range(stride / 2, self.image_size[1], stride)
|
93
|
-
y = tf.range(stride / 2, self.image_size[0], stride)
|
94
|
-
xv, yv = tf.meshgrid(x, y)
|
95
|
-
xv = tf.cast(tf.reshape(xv, [-1]), dtype=tf.float32)
|
96
|
-
yv = tf.cast(tf.reshape(yv, [-1]), dtype=tf.float32)
|
97
110
|
# Tensor shape Nx4.
|
98
111
|
boxes = tf.stack(
|
99
112
|
[
|
@@ -102,41 +115,18 @@ class Anchor(object):
|
|
102
115
|
yv + half_anchor_size_y,
|
103
116
|
xv + half_anchor_size_x,
|
104
117
|
],
|
105
|
-
axis
|
118
|
+
axis=-1,
|
106
119
|
)
|
107
120
|
boxes_l.append(boxes)
|
108
|
-
# Concat anchors on the same level to tensor shape
|
109
|
-
boxes_l = tf.
|
110
|
-
|
111
|
-
|
112
|
-
return tf.concat(boxes_all, axis=0)
|
113
|
-
|
114
|
-
def unpack_labels(self, labels: tf.Tensor) -> Dict[str, tf.Tensor]:
|
115
|
-
"""Unpacks an array of labels into multi-scales labels."""
|
116
|
-
unpacked_labels = collections.OrderedDict()
|
117
|
-
count = 0
|
118
|
-
for level in range(self.min_level, self.max_level + 1):
|
119
|
-
feat_size_y = tf.cast(
|
120
|
-
math.ceil(self.image_size[0] / 2**level), tf.int32
|
121
|
-
)
|
122
|
-
feat_size_x = tf.cast(
|
123
|
-
math.ceil(self.image_size[1] / 2**level), tf.int32
|
124
|
-
)
|
125
|
-
steps = feat_size_y * feat_size_x * self.anchors_per_location
|
126
|
-
unpacked_labels[str(level)] = tf.reshape(
|
127
|
-
labels[count : count + steps], [feat_size_y, feat_size_x, -1]
|
128
|
-
)
|
129
|
-
count += steps
|
130
|
-
return unpacked_labels
|
121
|
+
# Concat anchors on the same level to tensor shape HxWx(Ax4).
|
122
|
+
boxes_l = tf.concat(boxes_l, axis=-1)
|
123
|
+
multilevel_boxes[str(level)] = boxes_l
|
124
|
+
return multilevel_boxes
|
131
125
|
|
132
126
|
@property
|
133
|
-
def anchors_per_location(self):
|
127
|
+
def anchors_per_location(self) -> int:
|
134
128
|
return self.num_scales * len(self.aspect_ratios)
|
135
129
|
|
136
|
-
@property
|
137
|
-
def multilevel_boxes(self):
|
138
|
-
return self.unpack_labels(self.boxes)
|
139
|
-
|
140
130
|
|
141
131
|
class AnchorLabeler(object):
|
142
132
|
"""Labeler for dense object detector."""
|
@@ -420,24 +410,68 @@ class RpnAnchorLabeler(AnchorLabeler):
|
|
420
410
|
return score_targets_dict, box_targets_dict
|
421
411
|
|
422
412
|
|
413
|
+
class AnchorGeneratorv2:
|
414
|
+
"""Utility to generate anchors for a multiple feature maps.
|
415
|
+
|
416
|
+
Attributes:
|
417
|
+
min_level: integer number of minimum level of the output feature pyramid.
|
418
|
+
max_level: integer number of maximum level of the output feature pyramid.
|
419
|
+
num_scales: integer number representing intermediate scales added on each
|
420
|
+
level. For instances, num_scales=2 adds one additional intermediate
|
421
|
+
anchor scales [2^0, 2^0.5] on each level.
|
422
|
+
aspect_ratios: list of float numbers representing the aspect ratio anchors
|
423
|
+
added on each level. The number indicates the ratio of width to height.
|
424
|
+
For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each
|
425
|
+
scale level.
|
426
|
+
anchor_size: float number representing the scale of size of the base
|
427
|
+
anchor to the feature stride 2^level.
|
428
|
+
"""
|
429
|
+
|
430
|
+
def __init__(
|
431
|
+
self,
|
432
|
+
min_level,
|
433
|
+
max_level,
|
434
|
+
num_scales,
|
435
|
+
aspect_ratios,
|
436
|
+
anchor_size,
|
437
|
+
):
|
438
|
+
"""Initializes the instance."""
|
439
|
+
self.min_level = min_level
|
440
|
+
self.max_level = max_level
|
441
|
+
self.num_scales = num_scales
|
442
|
+
self.aspect_ratios = aspect_ratios
|
443
|
+
self.anchor_size = anchor_size
|
444
|
+
|
445
|
+
def __call__(self, image_size):
|
446
|
+
"""Generate multilevel anchor boxes.
|
447
|
+
|
448
|
+
Args:
|
449
|
+
image_size: a list of integer numbers or Tensors representing [height,
|
450
|
+
width] of the input image size.
|
451
|
+
Returns:
|
452
|
+
An ordered dictionary from level to anchor boxes of shape [height_l,
|
453
|
+
width_l, num_anchors_per_location * 4].
|
454
|
+
"""
|
455
|
+
return Anchor(
|
456
|
+
min_level=self.min_level,
|
457
|
+
max_level=self.max_level,
|
458
|
+
num_scales=self.num_scales,
|
459
|
+
aspect_ratios=self.aspect_ratios,
|
460
|
+
anchor_size=self.anchor_size,
|
461
|
+
image_size=image_size,
|
462
|
+
).multilevel_boxes
|
463
|
+
|
464
|
+
|
423
465
|
def build_anchor_generator(
|
424
466
|
min_level, max_level, num_scales, aspect_ratios, anchor_size
|
425
467
|
):
|
426
468
|
"""Build anchor generator from levels."""
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
scales.append(2 ** (scale / float(num_scales)))
|
432
|
-
for level in range(min_level, max_level + 1):
|
433
|
-
stride = 2**level
|
434
|
-
strides[str(level)] = stride
|
435
|
-
anchor_sizes[str(level)] = anchor_size * stride
|
436
|
-
anchor_gen = anchor_generator.AnchorGenerator(
|
437
|
-
anchor_sizes=anchor_sizes,
|
438
|
-
scales=scales,
|
469
|
+
anchor_gen = AnchorGeneratorv2(
|
470
|
+
min_level=min_level,
|
471
|
+
max_level=max_level,
|
472
|
+
num_scales=num_scales,
|
439
473
|
aspect_ratios=aspect_ratios,
|
440
|
-
|
474
|
+
anchor_size=anchor_size,
|
441
475
|
)
|
442
476
|
return anchor_gen
|
443
477
|
|
@@ -77,7 +77,7 @@ class MultiScaleAnchorGeneratorTest(parameterized.TestCase, tf.test.TestCase):
|
|
77
77
|
levels = range(min_level, max_level + 1)
|
78
78
|
anchor_sizes = [2**(level + 1) for level in levels]
|
79
79
|
strides = [2**level for level in levels]
|
80
|
-
anchor_gen = anchor_generator.
|
80
|
+
anchor_gen = anchor_generator.AnchorGeneratorv1(
|
81
81
|
anchor_sizes=anchor_sizes,
|
82
82
|
scales=[1.],
|
83
83
|
aspect_ratios=aspect_ratios,
|
@@ -98,7 +98,7 @@ class MultiScaleAnchorGeneratorTest(parameterized.TestCase, tf.test.TestCase):
|
|
98
98
|
levels = range(min_level, max_level + 1)
|
99
99
|
anchor_sizes = [2**(level + 1) for level in levels]
|
100
100
|
strides = [2**level for level in levels]
|
101
|
-
anchor_gen = anchor_generator.
|
101
|
+
anchor_gen = anchor_generator.AnchorGeneratorv1(
|
102
102
|
anchor_sizes=anchor_sizes,
|
103
103
|
scales=[1.],
|
104
104
|
aspect_ratios=aspect_ratios,
|
@@ -122,7 +122,7 @@ class MultiScaleAnchorGeneratorTest(parameterized.TestCase, tf.test.TestCase):
|
|
122
122
|
levels = range(min_level, max_level + 1)
|
123
123
|
anchor_sizes = dict((str(level), 2**(level + 1)) for level in levels)
|
124
124
|
strides = dict((str(level), 2**level) for level in levels)
|
125
|
-
anchor_gen = anchor_generator.
|
125
|
+
anchor_gen = anchor_generator.AnchorGeneratorv1(
|
126
126
|
anchor_sizes=anchor_sizes,
|
127
127
|
scales=[1.],
|
128
128
|
aspect_ratios=aspect_ratios,
|
@@ -58,40 +58,52 @@ class AnchorTest(parameterized.TestCase, tf.test.TestCase):
|
|
58
58
|
self.assertEqual(negatives, expected_negatives)
|
59
59
|
|
60
60
|
@parameterized.parameters(
|
61
|
-
# Single scale anchor
|
62
|
-
(5, 5, 1, [1.0], 2.0,
|
63
|
-
[[-16, -16, 48, 48], [-16, 16, 48, 80],
|
64
|
-
|
65
|
-
# Multi scale anchor
|
66
|
-
(5, 6, 1, [1.0], 2.0,
|
67
|
-
[[-16, -16, 48, 48], [-16, 16, 48, 80],
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
61
|
+
# Single scale anchor
|
62
|
+
(5, 5, 1, [1.0], 2.0, [64, 64],
|
63
|
+
{'5': [[[-16, -16, 48, 48], [-16, 16, 48, 80]],
|
64
|
+
[[16, -16, 80, 48], [16, 16, 80, 80]]]}),
|
65
|
+
# Multi scale anchor
|
66
|
+
(5, 6, 1, [1.0], 2.0, [64, 64],
|
67
|
+
{'5': [[[-16, -16, 48, 48], [-16, 16, 48, 80]],
|
68
|
+
[[16, -16, 80, 48], [16, 16, 80, 80]]],
|
69
|
+
'6': [[[-32, -32, 96, 96]]]}),
|
70
|
+
# Multi aspect ratio anchor
|
71
|
+
(6, 6, 1, [1.0, 4.0, 0.25], 2.0, [64, 64],
|
72
|
+
{'6': [[[-32, -32, 96, 96, -0, -96, 64, 160, -96, -0, 160, 64]]]}),
|
73
|
+
# Intermidate scales
|
74
|
+
(5, 5, 2, [1.0], 1.0, [32, 32],
|
75
|
+
{'5': [[[0, 0, 32, 32,
|
76
|
+
16 - 16 * 2**0.5, 16 - 16 * 2**0.5,
|
77
|
+
16 + 16 * 2**0.5, 16 + 16 * 2**0.5]]]}),
|
78
|
+
# Non-square
|
79
|
+
(5, 5, 1, [1.0], 1.0, [64, 32],
|
80
|
+
{'5': [[[0, 0, 32, 32]],
|
81
|
+
[[32, 0, 64, 32]]]}),
|
82
|
+
# Indivisible by 2^level
|
83
|
+
(5, 5, 1, [1.0], 1.0, [40, 32],
|
84
|
+
{'5': [[[-6, 0, 26, 32]],
|
85
|
+
[[14, 0, 46, 32]]]}),
|
73
86
|
)
|
74
87
|
def testAnchorGeneration(self, min_level, max_level, num_scales,
|
75
|
-
aspect_ratios, anchor_size,
|
76
|
-
|
88
|
+
aspect_ratios, anchor_size, image_size,
|
89
|
+
expected_boxes):
|
77
90
|
anchors = anchor.Anchor(min_level, max_level, num_scales, aspect_ratios,
|
78
91
|
anchor_size, image_size)
|
79
|
-
|
80
|
-
self.assertEqual(expected_boxes, boxes.tolist())
|
92
|
+
self.assertAllClose(expected_boxes, anchors.multilevel_boxes)
|
81
93
|
|
82
94
|
@parameterized.parameters(
|
83
95
|
# Single scale anchor.
|
84
96
|
(5, 5, 1, [1.0], 2.0,
|
85
|
-
[[-16, -16, 48, 48], [-16, 16, 48, 80],
|
86
|
-
|
97
|
+
{'5': [[[-16, -16, 48, 48], [-16, 16, 48, 80]],
|
98
|
+
[[16, -16, 80, 48], [16, 16, 80, 80]]]}),
|
87
99
|
# Multi scale anchor.
|
88
100
|
(5, 6, 1, [1.0], 2.0,
|
89
|
-
[[-16, -16, 48, 48], [-16, 16, 48, 80],
|
90
|
-
|
91
|
-
|
101
|
+
{'5': [[[-16, -16, 48, 48], [-16, 16, 48, 80]],
|
102
|
+
[[16, -16, 80, 48], [16, 16, 80, 80]]],
|
103
|
+
'6': [[[-32, -32, 96, 96]]]}),
|
104
|
+
# Multi aspect ratio anchor.
|
92
105
|
(6, 6, 1, [1.0, 4.0, 0.25], 2.0,
|
93
|
-
[[-32, -32, 96, 96
|
94
|
-
|
106
|
+
{'6': [[[-32, -32, 96, 96, -0, -96, 64, 160, -96, -0, 160, 64]]]}),
|
95
107
|
)
|
96
108
|
def testAnchorGenerationWithImageSizeAsTensor(self,
|
97
109
|
min_level,
|
@@ -103,8 +115,25 @@ class AnchorTest(parameterized.TestCase, tf.test.TestCase):
|
|
103
115
|
image_size = tf.constant([64, 64], tf.int32)
|
104
116
|
anchors = anchor.Anchor(min_level, max_level, num_scales, aspect_ratios,
|
105
117
|
anchor_size, image_size)
|
106
|
-
|
107
|
-
|
118
|
+
self.assertAllClose(expected_boxes, anchors.multilevel_boxes)
|
119
|
+
|
120
|
+
@parameterized.parameters(
|
121
|
+
(6, 8, 2, [1.0, 2.0, 0.5], 3.0, [320, 256]),
|
122
|
+
)
|
123
|
+
def testAnchorGenerationAreCentered(self, min_level, max_level, num_scales,
|
124
|
+
aspect_ratios, anchor_size, image_size):
|
125
|
+
anchors = anchor.Anchor(min_level, max_level, num_scales, aspect_ratios,
|
126
|
+
anchor_size, image_size)
|
127
|
+
multilevel_boxes = anchors.multilevel_boxes
|
128
|
+
image_size = np.array(image_size)
|
129
|
+
for boxes in multilevel_boxes.values():
|
130
|
+
boxes = boxes.numpy()
|
131
|
+
box_centers = boxes.mean(axis=0).mean(axis=0)
|
132
|
+
box_centers = [
|
133
|
+
(box_centers[0] + box_centers[2]) / 2,
|
134
|
+
(box_centers[1] + box_centers[3]) / 2,
|
135
|
+
]
|
136
|
+
self.assertAllClose(image_size / 2, box_centers)
|
108
137
|
|
109
138
|
@parameterized.parameters(
|
110
139
|
(3, 6, 2, [1.0], 2.0, False),
|
@@ -164,6 +193,7 @@ class AnchorTest(parameterized.TestCase, tf.test.TestCase):
|
|
164
193
|
(3, 7, [.5, 1., 2.], 2, 8, (256, 256)),
|
165
194
|
(3, 8, [1.], 3, 32, (512, 512)),
|
166
195
|
(3, 3, [1.], 2, 4, (32, 32)),
|
196
|
+
(4, 8, [.5, 1., 2.], 2, 3, (320, 256)),
|
167
197
|
)
|
168
198
|
def testEquivalentResult(self, min_level, max_level, aspect_ratios,
|
169
199
|
num_scales, anchor_size, image_size):
|
@@ -168,6 +168,7 @@ def resize_and_crop_image(
|
|
168
168
|
seed=1,
|
169
169
|
method=tf.image.ResizeMethod.BILINEAR,
|
170
170
|
keep_aspect_ratio=True,
|
171
|
+
centered_crop=False,
|
171
172
|
):
|
172
173
|
"""Resizes the input image to output size (RetinaNet style).
|
173
174
|
|
@@ -195,6 +196,9 @@ def resize_and_crop_image(
|
|
195
196
|
seed: seed for random scale jittering.
|
196
197
|
method: function to resize input image to scaled image.
|
197
198
|
keep_aspect_ratio: whether or not to keep the aspect ratio when resizing.
|
199
|
+
centered_crop: If `centered_crop` is set to True, then resized crop (if
|
200
|
+
smaller than padded size) is place in the center of the image. Default
|
201
|
+
behaviour is to place it at left top corner.
|
198
202
|
|
199
203
|
Returns:
|
200
204
|
output_image: `Tensor` of shape [height, width, 3] where [height, width]
|
@@ -266,9 +270,19 @@ def resize_and_crop_image(
|
|
266
270
|
|
267
271
|
output_image = scaled_image
|
268
272
|
if padded_size is not None:
|
269
|
-
|
270
|
-
|
271
|
-
|
273
|
+
if centered_crop:
|
274
|
+
scaled_image_size = tf.cast(tf.shape(scaled_image)[0:2], tf.int32)
|
275
|
+
output_image = tf.image.pad_to_bounding_box(
|
276
|
+
scaled_image,
|
277
|
+
tf.maximum((padded_size[0] - scaled_image_size[0]) // 2, 0),
|
278
|
+
tf.maximum((padded_size[1] - scaled_image_size[1]) // 2, 0),
|
279
|
+
padded_size[0],
|
280
|
+
padded_size[1],
|
281
|
+
)
|
282
|
+
else:
|
283
|
+
output_image = tf.image.pad_to_bounding_box(
|
284
|
+
scaled_image, 0, 0, padded_size[0], padded_size[1]
|
285
|
+
)
|
272
286
|
|
273
287
|
image_info = tf.stack([
|
274
288
|
image_size,
|
@@ -686,7 +700,9 @@ def resize_and_crop_boxes(boxes, image_scale, output_size, offset):
|
|
686
700
|
return boxes
|
687
701
|
|
688
702
|
|
689
|
-
def resize_and_crop_masks(
|
703
|
+
def resize_and_crop_masks(
|
704
|
+
masks, image_scale, output_size, offset, centered_crop: bool = False
|
705
|
+
):
|
690
706
|
"""Resizes boxes to output size with scale and offset.
|
691
707
|
|
692
708
|
Args:
|
@@ -697,6 +713,9 @@ def resize_and_crop_masks(masks, image_scale, output_size, offset):
|
|
697
713
|
output image size.
|
698
714
|
offset: 2D `Tensor` representing top-left corner [y0, x0] to crop scaled
|
699
715
|
boxes.
|
716
|
+
centered_crop: If `centered_crop` is set to True, then resized crop (if
|
717
|
+
smaller than padded size) is place in the center of the image. Default
|
718
|
+
behaviour is to place it at left top corner.
|
700
719
|
|
701
720
|
Returns:
|
702
721
|
masks: `Tensor` of shape [N, H, W, C] representing the scaled masks.
|
@@ -719,6 +738,7 @@ def resize_and_crop_masks(masks, image_scale, output_size, offset):
|
|
719
738
|
scaled_masks = tf.image.resize(
|
720
739
|
masks, scaled_size, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR
|
721
740
|
)
|
741
|
+
|
722
742
|
offset = tf.cast(offset, tf.int32)
|
723
743
|
scaled_masks = scaled_masks[
|
724
744
|
:,
|
@@ -727,9 +747,20 @@ def resize_and_crop_masks(masks, image_scale, output_size, offset):
|
|
727
747
|
:,
|
728
748
|
]
|
729
749
|
|
730
|
-
|
731
|
-
|
732
|
-
|
750
|
+
if centered_crop:
|
751
|
+
scaled_mask_size = tf.cast(tf.shape(scaled_masks)[1:3], tf.int32)
|
752
|
+
output_masks = tf.image.pad_to_bounding_box(
|
753
|
+
scaled_masks,
|
754
|
+
tf.maximum((output_size[0] - scaled_mask_size[0]) // 2, 0),
|
755
|
+
tf.maximum((output_size[1] - scaled_mask_size[1]) // 2, 0),
|
756
|
+
output_size[0],
|
757
|
+
output_size[1],
|
758
|
+
)
|
759
|
+
else:
|
760
|
+
output_masks = tf.image.pad_to_bounding_box(
|
761
|
+
scaled_masks, 0, 0, output_size[0], output_size[1]
|
762
|
+
)
|
763
|
+
|
733
764
|
# Remove padding.
|
734
765
|
output_masks = output_masks[1::]
|
735
766
|
return output_masks
|
@@ -482,6 +482,46 @@ class InputUtilsTest(parameterized.TestCase, tf.test.TestCase):
|
|
482
482
|
self.assertShapeEqual(input_image, aug_image)
|
483
483
|
self.assertDTypeEqual(aug_image, np.uint8)
|
484
484
|
|
485
|
+
@parameterized.parameters(0.25, 0.5, 0.75, 1, 1.25, 1.5)
|
486
|
+
def test_resize_and_crop_image_and_masks(self, scale):
|
487
|
+
image = tf.convert_to_tensor(np.random.rand(1024, 2048, 3))
|
488
|
+
label = tf.convert_to_tensor(np.ones((1, 1024, 2048, 1), dtype=np.int32))
|
489
|
+
image, image_info = preprocess_ops.resize_and_crop_image(
|
490
|
+
image, (256, 256), (256, 256), scale, scale, centered_crop=True
|
491
|
+
)
|
492
|
+
image_scale = image_info[2, :]
|
493
|
+
offset = image_info[3, :]
|
494
|
+
label = preprocess_ops.resize_and_crop_masks(
|
495
|
+
label, image_scale, (256, 256), offset, centered_crop=True
|
496
|
+
)
|
497
|
+
self.assertEqual(image.shape[0:2], label.shape[1:3])
|
498
|
+
image_arr = image.numpy()
|
499
|
+
label_arr = np.squeeze(label.numpy())
|
500
|
+
|
501
|
+
scaled_height = round(1024 * 256 * scale / 2048)
|
502
|
+
scaled_width = round(2048 * 256 * scale / 2048)
|
503
|
+
height_offset = max((256 - scaled_height) // 2, 0)
|
504
|
+
width_offset = max((256 - scaled_width) // 2, 0)
|
505
|
+
|
506
|
+
self.assertEqual(
|
507
|
+
label_arr[
|
508
|
+
height_offset : 256 - height_offset,
|
509
|
+
width_offset : 256 - width_offset,
|
510
|
+
].mean(),
|
511
|
+
1,
|
512
|
+
)
|
513
|
+
self.assertEqual(label_arr[0:height_offset, :].mean(), 0)
|
514
|
+
self.assertEqual(image_arr[0:height_offset, :, :].mean(), 0)
|
515
|
+
self.assertEqual(label_arr[256 - height_offset :, :].mean(), 0)
|
516
|
+
self.assertEqual(image_arr[256 - height_offset :, :, :].mean(), 0)
|
517
|
+
if width_offset > 0:
|
518
|
+
self.assertEqual(label_arr[height_offset, 0:width_offset].mean(), 0)
|
519
|
+
self.assertEqual(label_arr[height_offset, 256 - width_offset :].mean(), 0)
|
520
|
+
self.assertEqual(image_arr[height_offset, 0:width_offset, :].mean(), 0)
|
521
|
+
self.assertEqual(
|
522
|
+
image_arr[height_offset, 256 - width_offset :, :].mean(), 0
|
523
|
+
)
|
524
|
+
|
485
525
|
|
486
526
|
if __name__ == '__main__':
|
487
527
|
tf.test.main()
|
@@ -114,7 +114,8 @@ class SemanticSegmentationTask(base_task.Task):
|
|
114
114
|
preserve_aspect_ratio=params.preserve_aspect_ratio,
|
115
115
|
dtype=params.dtype,
|
116
116
|
image_feature=params.image_feature,
|
117
|
-
additional_dense_features=params.additional_dense_features
|
117
|
+
additional_dense_features=params.additional_dense_features,
|
118
|
+
centered_crop=params.centered_crop)
|
118
119
|
|
119
120
|
reader = input_reader_factory.input_reader_generator(
|
120
121
|
params,
|
@@ -298,7 +298,7 @@ official/nlp/metrics/__init__.py,sha256=7oiypy0N82PDw9aSdcJBLVoGTd_oRSUOdvuJhMv4
|
|
298
298
|
official/nlp/metrics/bleu.py,sha256=XOTTbjC3B9lt8-MLvNX02tjA94wfsUVse6KJ5CWPzfk,6587
|
299
299
|
official/nlp/metrics/bleu_test.py,sha256=0j4pZ1MSIcndvUNZa25oXCu4UFOE367KaL7oRNCzLCI,2508
|
300
300
|
official/nlp/modeling/__init__.py,sha256=SQozaRl78tYS6xvGCfM3msABe2VL20x_mL2vIln1Sn0,1062
|
301
|
-
official/nlp/modeling/layers/__init__.py,sha256=
|
301
|
+
official/nlp/modeling/layers/__init__.py,sha256=no0uyA68hsEDa_UuUdhC4jXeqb4lj-3dp6j0GTDzrIQ,4864
|
302
302
|
official/nlp/modeling/layers/attention.py,sha256=3-jG3m_L9Y41BY35c4uTFG_Ywlfk4SOwUEtmqfSoKkk,3906
|
303
303
|
official/nlp/modeling/layers/attention_test.py,sha256=c7KezuYUze8PWAPuwYow8KTQNRyuuJgwICSsFTyJ2nQ,3536
|
304
304
|
official/nlp/modeling/layers/bigbird_attention.py,sha256=dzutgRoQt2DFsYMpMILv_QF0O_FMDbiLQ3T-7c1Zpcs,21111
|
@@ -965,7 +965,7 @@ official/vision/configs/maskrcnn.py,sha256=yL8kggxXaCTIpSkcozAV2UudO7UqVcEh1_-rM
|
|
965
965
|
official/vision/configs/maskrcnn_test.py,sha256=Wfkbz30h2qxPcpuu6CEpQsf8I_2df6y10-4bRLsWlj8,1733
|
966
966
|
official/vision/configs/retinanet.py,sha256=oCKinkh4IyPslmI1pakwi6dVziwjkZ2cIcpSoGRjqnM,17806
|
967
967
|
official/vision/configs/retinanet_test.py,sha256=ffS3QufQMLF8FZhKNmi7Yr1RDTnIyZ1XKQ9agr2EyW8,1699
|
968
|
-
official/vision/configs/semantic_segmentation.py,sha256=
|
968
|
+
official/vision/configs/semantic_segmentation.py,sha256=4ZAyLWKcFYReyrEWBc5b7wld3mMcuH0RcaRe_4J2RrA,30831
|
969
969
|
official/vision/configs/semantic_segmentation_test.py,sha256=va-ZG6CtBKcs0NicZe6WmJvHxPxxih7nB0orNtrRiEA,1867
|
970
970
|
official/vision/configs/video_classification.py,sha256=tf2XJhD_7c1Ned3eS93Sc0qrQ8U3M_zVJy09KI-9em8,14513
|
971
971
|
official/vision/configs/video_classification_test.py,sha256=I1HSamxRQ3-f4-YHIeUChnT5CtHCxFQdiL0zy6RRUXU,1879
|
@@ -988,7 +988,7 @@ official/vision/dataloaders/input_reader_factory.py,sha256=WpvSA8qyqAo3wkmme4WqX
|
|
988
988
|
official/vision/dataloaders/maskrcnn_input.py,sha256=iCc08yYD-7mvIPojgBjm_nSvoQACXWCIeZNZN8CfXSs,16822
|
989
989
|
official/vision/dataloaders/parser.py,sha256=nMXnhigMa_ascSJ2OK88xi4HdE9xvfL3G4oMrHau-t4,2315
|
990
990
|
official/vision/dataloaders/retinanet_input.py,sha256=joxJL4hQVPw-FW5iUc7RsxP60N7iYGRuVFpU3gC5flE,18291
|
991
|
-
official/vision/dataloaders/segmentation_input.py,sha256=
|
991
|
+
official/vision/dataloaders/segmentation_input.py,sha256=Klg5KAChYZDRvqzZfyIzdPy54rTlWYZp2AotolD3WX8,12934
|
992
992
|
official/vision/dataloaders/tf_example_decoder.py,sha256=9yCT6uSLMpmw50w7zdaRR_BXy6vIvliLZntrYAgzD18,8647
|
993
993
|
official/vision/dataloaders/tf_example_decoder_test.py,sha256=PHxneXHn5-eIMdmk1uI4IPLa178kTCifa4EF53ik2Jo,12629
|
994
994
|
official/vision/dataloaders/tf_example_label_map_decoder.py,sha256=EHu6ZQvYxqjUliOlsN_f4okYt9Hdpydv_lM_dQwrklU,2598
|
@@ -1101,10 +1101,10 @@ official/vision/modeling/layers/roi_generator.py,sha256=ZL-m5IObg8JsXJ_DZ0QQusv2
|
|
1101
1101
|
official/vision/modeling/layers/roi_sampler.py,sha256=uajmniFsZrmAR_-ojva_dVm9xZU3wze5EqCryP9gUoQ,10006
|
1102
1102
|
official/vision/modeling/models/__init__.py,sha256=9tSOgQ3WTOmg91vIOdiAzpcHCvMJ76nFqCDyQR095k4,1020
|
1103
1103
|
official/vision/ops/__init__.py,sha256=7oiypy0N82PDw9aSdcJBLVoGTd_oRSUOdvuJhMv4leQ,609
|
1104
|
-
official/vision/ops/anchor.py,sha256=
|
1105
|
-
official/vision/ops/anchor_generator.py,sha256=
|
1106
|
-
official/vision/ops/anchor_generator_test.py,sha256=
|
1107
|
-
official/vision/ops/anchor_test.py,sha256=
|
1104
|
+
official/vision/ops/anchor.py,sha256=awu5WOXwTK8H5j2MppnRQcvPHNXSCiBnKcJCnISNfnE,20631
|
1105
|
+
official/vision/ops/anchor_generator.py,sha256=cN2k9rw4EpHPGrtSm8Ea51QERPy6WB1XGwYKjAkP8Uw,7276
|
1106
|
+
official/vision/ops/anchor_generator_test.py,sha256=qL4LCRr7UzwWlXmWNuS_yVwXe0ygNdMwYOz5T6Mrar4,5302
|
1107
|
+
official/vision/ops/anchor_test.py,sha256=PP2UNanwQjdiGxGaL9FZjU0vqTUbOp_Ni_1Kedga9aU,9015
|
1108
1108
|
official/vision/ops/augment.py,sha256=20K-OWV_GLbpzv8sY5rBe6x3H5MIRbTlDWOPTfNlGeY,108290
|
1109
1109
|
official/vision/ops/augment_test.py,sha256=9QISeqJ79JgkI2-Hn4pMxPRRin3mbOd7mSJ4v7S8W6E,25881
|
1110
1110
|
official/vision/ops/box_matcher.py,sha256=AvZd7CUUZnT4FwETLyVz3Uxb3gO-o94OwlZrvm7CtX0,9067
|
@@ -1115,10 +1115,10 @@ official/vision/ops/iou_similarity_test.py,sha256=x5jlcMqMCUYC5cRgdbR0VlAW67AoXo
|
|
1115
1115
|
official/vision/ops/mask_ops.py,sha256=cZLpIowzEA57bXPDbVXa6mktZVHvGSH-TQ1CxHjpQXw,10270
|
1116
1116
|
official/vision/ops/mask_ops_test.py,sha256=D3xbbbleJd4HkpWOSDSEy6hNihsRBY93BqPF6JP-dJk,2835
|
1117
1117
|
official/vision/ops/nms.py,sha256=bKYDAtyV5j6PG7g-RGF2ZccCI5V1xVvuajNblCy1TGs,8125
|
1118
|
-
official/vision/ops/preprocess_ops.py,sha256=
|
1118
|
+
official/vision/ops/preprocess_ops.py,sha256=tDW9apUocwSzGnKkgSrKGwqbcZpb6-WQ8nGnW1_ds20,42569
|
1119
1119
|
official/vision/ops/preprocess_ops_3d.py,sha256=K2583ynA9Zt9pOBNoWSD8KtQR1fwRYBoylJ9NusIBtI,16110
|
1120
1120
|
official/vision/ops/preprocess_ops_3d_test.py,sha256=1MmygNfRBnQYGszsrKBGqP_GrPlP4_UGuddCbkYcIms,8364
|
1121
|
-
official/vision/ops/preprocess_ops_test.py,sha256=
|
1121
|
+
official/vision/ops/preprocess_ops_test.py,sha256=FY3EUvQIb82fYqYJPmzkE7pmkhXIQrd7JNLGFPB6SXI,17213
|
1122
1122
|
official/vision/ops/sampling_ops.py,sha256=1jywCA_E4qDUFWsykCLUwZsWtQSR0QREXdJhvP5qCvc,16072
|
1123
1123
|
official/vision/ops/spatial_transform_ops.py,sha256=PVEJGAn0ygtsrid84vD5GgV0jsjyWoNn14RBzreMxM4,38389
|
1124
1124
|
official/vision/ops/target_gather.py,sha256=Ir3X76yXYEVFSYX5h-yfS8SMkY37GYuypBP2C8ykggo,3965
|
@@ -1150,7 +1150,7 @@ official/vision/tasks/__init__.py,sha256=qfhL5xyDrjZez_zjw613TyciLkqtWm-INFeES7G
|
|
1150
1150
|
official/vision/tasks/image_classification.py,sha256=Oh4tH-SAD0-MmVfglE3pFXI6-LXVJr2C-WwbudnOdSk,16699
|
1151
1151
|
official/vision/tasks/maskrcnn.py,sha256=iC8-OIFyYcQWpdbBODCXEag2R3YUNdQcZsn_wYAd8f0,25569
|
1152
1152
|
official/vision/tasks/retinanet.py,sha256=EFILc2YPDeLqWcH7QtzN2k5sT5KdKQwioh12NKVOIqg,18261
|
1153
|
-
official/vision/tasks/semantic_segmentation.py,sha256=
|
1153
|
+
official/vision/tasks/semantic_segmentation.py,sha256=hQBxyT1qZ1SQd4xHCWETiVNcQs96mYxPEXzdTMY8zTI,14287
|
1154
1154
|
official/vision/tasks/video_classification.py,sha256=F4RnG_OvnayPDeWb8khEp8lFyM6CRWi_FlUaBOgsQjk,14318
|
1155
1155
|
official/vision/utils/__init__.py,sha256=7oiypy0N82PDw9aSdcJBLVoGTd_oRSUOdvuJhMv4leQ,609
|
1156
1156
|
official/vision/utils/ops_test.py,sha256=GqsKndrbfbpsfR6Bhs4gnMelXMVnO66CN5eNna-Wj7Y,4394
|
@@ -1203,9 +1203,9 @@ tensorflow_models/__init__.py,sha256=etxw45SHxuwFCRX5qGxGMP83II0JfJulzNl5GSNJvhw
|
|
1203
1203
|
tensorflow_models/tensorflow_models_test.py,sha256=AxUYUdiQn416UR7jg0h6rmv688esvlKDfpyDCIQkF18,1395
|
1204
1204
|
tensorflow_models/nlp/__init__.py,sha256=4tA5Pf4qaFwT-fIFOpX7x7FHJpnyJT-5UgOeFYTyMlc,807
|
1205
1205
|
tensorflow_models/vision/__init__.py,sha256=zBorY_v5xva1uI-qxhZO3Qh-Dii-Suq6wEYh6hKHDfc,833
|
1206
|
-
tf_models_nightly-2.17.0.
|
1207
|
-
tf_models_nightly-2.17.0.
|
1208
|
-
tf_models_nightly-2.17.0.
|
1209
|
-
tf_models_nightly-2.17.0.
|
1210
|
-
tf_models_nightly-2.17.0.
|
1211
|
-
tf_models_nightly-2.17.0.
|
1206
|
+
tf_models_nightly-2.17.0.dev20240320.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
|
1207
|
+
tf_models_nightly-2.17.0.dev20240320.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
|
1208
|
+
tf_models_nightly-2.17.0.dev20240320.dist-info/METADATA,sha256=xPZzhngyKemP0x15q_EQ3oPSZAv8zboLm-n68yaKW6I,1432
|
1209
|
+
tf_models_nightly-2.17.0.dev20240320.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
|
1210
|
+
tf_models_nightly-2.17.0.dev20240320.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
|
1211
|
+
tf_models_nightly-2.17.0.dev20240320.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|