tf-models-nightly 2.17.0.dev20240312__py2.py3-none-any.whl → 2.17.0.dev20240314__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- official/vision/configs/semantic_segmentation.py +4 -0
- official/vision/dataloaders/segmentation_input.py +117 -54
- official/vision/ops/preprocess_ops.py +347 -210
- official/vision/ops/preprocess_ops_test.py +192 -90
- official/vision/serving/image_classification.py +21 -13
- official/vision/tasks/semantic_segmentation.py +2 -1
- {tf_models_nightly-2.17.0.dev20240312.dist-info → tf_models_nightly-2.17.0.dev20240314.dist-info}/METADATA +1 -1
- {tf_models_nightly-2.17.0.dev20240312.dist-info → tf_models_nightly-2.17.0.dev20240314.dist-info}/RECORD +12 -12
- {tf_models_nightly-2.17.0.dev20240312.dist-info → tf_models_nightly-2.17.0.dev20240314.dist-info}/AUTHORS +0 -0
- {tf_models_nightly-2.17.0.dev20240312.dist-info → tf_models_nightly-2.17.0.dev20240314.dist-info}/LICENSE +0 -0
- {tf_models_nightly-2.17.0.dev20240312.dist-info → tf_models_nightly-2.17.0.dev20240314.dist-info}/WHEEL +0 -0
- {tf_models_nightly-2.17.0.dev20240312.dist-info → tf_models_nightly-2.17.0.dev20240314.dist-info}/top_level.txt +0 -0
@@ -50,11 +50,13 @@ class InputUtilsTest(parameterized.TestCase, tf.test.TestCase):
|
|
50
50
|
padding_shape = input_shape[:]
|
51
51
|
padding_shape[0] = max(output_size - input_shape[0], 0)
|
52
52
|
expected_outputs = np.concatenate(
|
53
|
-
[np.ones(clip_shape), np.zeros(padding_shape)], axis=0
|
53
|
+
[np.ones(clip_shape), np.zeros(padding_shape)], axis=0
|
54
|
+
)
|
54
55
|
|
55
56
|
data = tf.ones(input_shape)
|
56
57
|
output_data = preprocess_ops.clip_or_pad_to_fixed_size(
|
57
|
-
data, output_size, constant_values=0
|
58
|
+
data, output_size, constant_values=0
|
59
|
+
)
|
58
60
|
output_data = output_data.numpy()
|
59
61
|
self.assertAllClose(output_size, output_data.shape[0])
|
60
62
|
self.assertAllClose(expected_outputs, output_data)
|
@@ -102,60 +104,81 @@ class InputUtilsTest(parameterized.TestCase, tf.test.TestCase):
|
|
102
104
|
(100, 256, 128, 256, 32, 1.0, 1.0, 128, 256),
|
103
105
|
(200, 512, 200, 128, 32, 0.25, 0.25, 224, 128),
|
104
106
|
)
|
105
|
-
def test_resize_and_crop_image_rectangluar_case(
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
107
|
+
def test_resize_and_crop_image_rectangluar_case(
|
108
|
+
self,
|
109
|
+
input_height,
|
110
|
+
input_width,
|
111
|
+
desired_height,
|
112
|
+
desired_width,
|
113
|
+
stride,
|
114
|
+
scale_y,
|
115
|
+
scale_x,
|
116
|
+
output_height,
|
117
|
+
output_width,
|
118
|
+
):
|
119
|
+
image = tf.convert_to_tensor(np.random.rand(input_height, input_width, 3))
|
112
120
|
|
113
121
|
desired_size = (desired_height, desired_width)
|
114
122
|
resized_image, image_info = preprocess_ops.resize_and_crop_image(
|
115
123
|
image,
|
116
124
|
desired_size=desired_size,
|
117
|
-
padded_size=preprocess_ops.compute_padded_size(desired_size, stride)
|
125
|
+
padded_size=preprocess_ops.compute_padded_size(desired_size, stride),
|
126
|
+
)
|
118
127
|
resized_image_shape = tf.shape(resized_image)
|
119
128
|
|
120
129
|
self.assertAllEqual(
|
121
|
-
[output_height, output_width, 3],
|
122
|
-
|
130
|
+
[output_height, output_width, 3], resized_image_shape.numpy()
|
131
|
+
)
|
123
132
|
self.assertNDArrayNear(
|
124
|
-
[
|
125
|
-
|
126
|
-
|
127
|
-
|
133
|
+
[
|
134
|
+
[input_height, input_width],
|
135
|
+
[desired_height, desired_width],
|
136
|
+
[scale_y, scale_x],
|
137
|
+
[0.0, 0.0],
|
138
|
+
],
|
128
139
|
image_info.numpy(),
|
129
|
-
1e-5
|
140
|
+
1e-5,
|
141
|
+
)
|
130
142
|
|
131
143
|
@parameterized.parameters(
|
132
144
|
(100, 200, 220, 220, 32, 1.1, 1.1, 224, 224),
|
133
145
|
(512, 512, 1024, 1024, 32, 2.0, 2.0, 1024, 1024),
|
134
146
|
)
|
135
|
-
def test_resize_and_crop_image_square_case(
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
147
|
+
def test_resize_and_crop_image_square_case(
|
148
|
+
self,
|
149
|
+
input_height,
|
150
|
+
input_width,
|
151
|
+
desired_height,
|
152
|
+
desired_width,
|
153
|
+
stride,
|
154
|
+
scale_y,
|
155
|
+
scale_x,
|
156
|
+
output_height,
|
157
|
+
output_width,
|
158
|
+
):
|
159
|
+
image = tf.convert_to_tensor(np.random.rand(input_height, input_width, 3))
|
141
160
|
|
142
161
|
desired_size = (desired_height, desired_width)
|
143
162
|
resized_image, image_info = preprocess_ops.resize_and_crop_image(
|
144
163
|
image,
|
145
164
|
desired_size=desired_size,
|
146
|
-
padded_size=preprocess_ops.compute_padded_size(desired_size, stride)
|
165
|
+
padded_size=preprocess_ops.compute_padded_size(desired_size, stride),
|
166
|
+
)
|
147
167
|
resized_image_shape = tf.shape(resized_image)
|
148
168
|
|
149
169
|
self.assertAllEqual(
|
150
|
-
[output_height, output_width, 3],
|
151
|
-
|
170
|
+
[output_height, output_width, 3], resized_image_shape.numpy()
|
171
|
+
)
|
152
172
|
self.assertNDArrayNear(
|
153
|
-
[
|
154
|
-
|
155
|
-
|
156
|
-
|
173
|
+
[
|
174
|
+
[input_height, input_width],
|
175
|
+
[desired_height, desired_width],
|
176
|
+
[scale_y, scale_x],
|
177
|
+
[0.0, 0.0],
|
178
|
+
],
|
157
179
|
image_info.numpy(),
|
158
|
-
1e-5
|
180
|
+
1e-5,
|
181
|
+
)
|
159
182
|
|
160
183
|
@parameterized.parameters((1,), (2,))
|
161
184
|
def test_resize_and_crop_image_tensor_desired_size(self, aug_scale_max):
|
@@ -166,7 +189,8 @@ class InputUtilsTest(parameterized.TestCase, tf.test.TestCase):
|
|
166
189
|
image,
|
167
190
|
desired_size=desired_size,
|
168
191
|
padded_size=preprocess_ops.compute_padded_size(desired_size, 32),
|
169
|
-
aug_scale_max=aug_scale_max
|
192
|
+
aug_scale_max=aug_scale_max,
|
193
|
+
)
|
170
194
|
resized_image_shape = tf.shape(resized_image)
|
171
195
|
|
172
196
|
self.assertAllEqual([224, 224, 3], resized_image_shape.numpy())
|
@@ -184,85 +208,108 @@ class InputUtilsTest(parameterized.TestCase, tf.test.TestCase):
|
|
184
208
|
(100, 200, 80, 100, 32, 0.5, 0.5, 50, 100, 96, 128),
|
185
209
|
(200, 100, 80, 100, 32, 0.5, 0.5, 100, 50, 128, 96),
|
186
210
|
)
|
187
|
-
def test_resize_and_crop_image_v2(
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
211
|
+
def test_resize_and_crop_image_v2(
|
212
|
+
self,
|
213
|
+
input_height,
|
214
|
+
input_width,
|
215
|
+
short_side,
|
216
|
+
long_side,
|
217
|
+
stride,
|
218
|
+
scale_y,
|
219
|
+
scale_x,
|
220
|
+
desired_height,
|
221
|
+
desired_width,
|
222
|
+
output_height,
|
223
|
+
output_width,
|
224
|
+
):
|
225
|
+
image = tf.convert_to_tensor(np.random.rand(input_height, input_width, 3))
|
193
226
|
image_shape = tf.shape(image)[0:2]
|
194
227
|
|
195
228
|
desired_size = tf.where(
|
196
229
|
tf.greater(image_shape[0], image_shape[1]),
|
197
230
|
tf.constant([long_side, short_side], dtype=tf.int32),
|
198
|
-
tf.constant([short_side, long_side], dtype=tf.int32)
|
231
|
+
tf.constant([short_side, long_side], dtype=tf.int32),
|
232
|
+
)
|
199
233
|
resized_image, image_info = preprocess_ops.resize_and_crop_image_v2(
|
200
234
|
image,
|
201
235
|
short_side=short_side,
|
202
236
|
long_side=long_side,
|
203
|
-
padded_size=preprocess_ops.compute_padded_size(desired_size, stride)
|
237
|
+
padded_size=preprocess_ops.compute_padded_size(desired_size, stride),
|
238
|
+
)
|
204
239
|
resized_image_shape = tf.shape(resized_image)
|
205
240
|
|
206
241
|
self.assertAllEqual(
|
207
|
-
[output_height, output_width, 3],
|
208
|
-
|
242
|
+
[output_height, output_width, 3], resized_image_shape.numpy()
|
243
|
+
)
|
209
244
|
self.assertNDArrayNear(
|
210
|
-
[
|
211
|
-
|
212
|
-
|
213
|
-
|
245
|
+
[
|
246
|
+
[input_height, input_width],
|
247
|
+
[desired_height, desired_width],
|
248
|
+
[scale_y, scale_x],
|
249
|
+
[0.0, 0.0],
|
250
|
+
],
|
214
251
|
image_info.numpy(),
|
215
|
-
1e-5
|
252
|
+
1e-5,
|
253
|
+
)
|
216
254
|
|
217
255
|
@parameterized.parameters(
|
218
|
-
(400, 600),
|
256
|
+
(400, 600),
|
257
|
+
(600, 400),
|
219
258
|
)
|
220
259
|
def test_center_crop_image(self, input_height, input_width):
|
221
|
-
image = tf.convert_to_tensor(
|
222
|
-
np.random.rand(input_height, input_width, 3))
|
260
|
+
image = tf.convert_to_tensor(np.random.rand(input_height, input_width, 3))
|
223
261
|
cropped_image = preprocess_ops.center_crop_image(image)
|
224
262
|
cropped_image_shape = tf.shape(cropped_image)
|
225
263
|
self.assertAllEqual([350, 350, 3], cropped_image_shape.numpy())
|
226
264
|
|
227
265
|
@parameterized.parameters(
|
228
|
-
(400, 600),
|
266
|
+
(400, 600),
|
267
|
+
(600, 400),
|
229
268
|
)
|
230
269
|
def test_center_crop_image_v2(self, input_height, input_width):
|
231
270
|
image_bytes = tf.constant(
|
232
271
|
_encode_image(
|
233
272
|
np.uint8(np.random.rand(input_height, input_width, 3) * 255),
|
234
|
-
fmt='JPEG'
|
235
|
-
|
273
|
+
fmt='JPEG',
|
274
|
+
),
|
275
|
+
dtype=tf.string,
|
276
|
+
)
|
236
277
|
cropped_image = preprocess_ops.center_crop_image_v2(
|
237
|
-
image_bytes, tf.constant([input_height, input_width, 3], tf.int32)
|
278
|
+
image_bytes, tf.constant([input_height, input_width, 3], tf.int32)
|
279
|
+
)
|
238
280
|
cropped_image_shape = tf.shape(cropped_image)
|
239
281
|
self.assertAllEqual([350, 350, 3], cropped_image_shape.numpy())
|
240
282
|
|
241
283
|
@parameterized.parameters(
|
242
|
-
(400, 600),
|
284
|
+
(400, 600),
|
285
|
+
(600, 400),
|
243
286
|
)
|
244
287
|
def test_random_crop_image(self, input_height, input_width):
|
245
|
-
image = tf.convert_to_tensor(
|
246
|
-
np.random.rand(input_height, input_width, 3))
|
288
|
+
image = tf.convert_to_tensor(np.random.rand(input_height, input_width, 3))
|
247
289
|
_ = preprocess_ops.random_crop_image(image)
|
248
290
|
|
249
291
|
@parameterized.parameters(
|
250
|
-
(400, 600),
|
292
|
+
(400, 600),
|
293
|
+
(600, 400),
|
251
294
|
)
|
252
295
|
def test_random_crop_image_v2(self, input_height, input_width):
|
253
296
|
image_bytes = tf.constant(
|
254
297
|
_encode_image(
|
255
298
|
np.uint8(np.random.rand(input_height, input_width, 3) * 255),
|
256
|
-
fmt='JPEG'
|
257
|
-
|
299
|
+
fmt='JPEG',
|
300
|
+
),
|
301
|
+
dtype=tf.string,
|
302
|
+
)
|
258
303
|
_ = preprocess_ops.random_crop_image_v2(
|
259
|
-
image_bytes, tf.constant([input_height, input_width, 3], tf.int32)
|
304
|
+
image_bytes, tf.constant([input_height, input_width, 3], tf.int32)
|
305
|
+
)
|
260
306
|
|
261
307
|
@parameterized.parameters((400, 600, 0), (400, 600, 0.4), (600, 400, 1.4))
|
262
308
|
def testColorJitter(self, input_height, input_width, color_jitter):
|
263
309
|
image = tf.convert_to_tensor(np.random.rand(input_height, input_width, 3))
|
264
|
-
jittered_image = preprocess_ops.color_jitter(
|
265
|
-
|
310
|
+
jittered_image = preprocess_ops.color_jitter(
|
311
|
+
image, color_jitter, color_jitter, color_jitter
|
312
|
+
)
|
266
313
|
assert jittered_image.shape == image.shape
|
267
314
|
|
268
315
|
@parameterized.parameters((400, 600, 0), (400, 600, 0.4), (600, 400, 1))
|
@@ -281,42 +328,51 @@ class InputUtilsTest(parameterized.TestCase, tf.test.TestCase):
|
|
281
328
|
left = np.random.randint(0, high=(input_width - boxes_width))
|
282
329
|
right = left + boxes_width
|
283
330
|
boxes = tf.constant(
|
284
|
-
np.concatenate([top, left, down, right], axis=-1), tf.float32
|
331
|
+
np.concatenate([top, left, down, right], axis=-1), tf.float32
|
332
|
+
)
|
285
333
|
labels = tf.constant(
|
286
|
-
np.random.randint(low=0, high=num_boxes, size=(num_boxes,)), tf.int64
|
334
|
+
np.random.randint(low=0, high=num_boxes, size=(num_boxes,)), tf.int64
|
335
|
+
)
|
287
336
|
_ = preprocess_ops.random_crop(image, boxes, labels)
|
288
337
|
|
289
338
|
@parameterized.parameters(
|
290
339
|
((640, 640, 3), (1000, 1000), None, (1000, 1000, 3)),
|
291
340
|
((1280, 640, 3), 320, None, (640, 320, 3)),
|
292
341
|
((640, 1280, 3), 320, None, (320, 640, 3)),
|
293
|
-
((640, 640, 3), 320, 100, (100, 100, 3))
|
342
|
+
((640, 640, 3), 320, 100, (100, 100, 3)),
|
343
|
+
)
|
294
344
|
def test_resize_image(self, input_shape, size, max_size, expected_shape):
|
295
345
|
resized_img, image_info = preprocess_ops.resize_image(
|
296
|
-
tf.zeros((input_shape)), size, max_size
|
346
|
+
tf.zeros((input_shape)), size, max_size
|
347
|
+
)
|
297
348
|
self.assertAllEqual(tf.shape(resized_img), expected_shape)
|
298
349
|
self.assertAllEqual(image_info[0], input_shape[:-1])
|
299
350
|
self.assertAllEqual(image_info[1], expected_shape[:-1])
|
300
351
|
self.assertAllEqual(
|
301
352
|
image_info[2],
|
302
|
-
np.array(expected_shape[:-1]) / np.array(input_shape[:-1])
|
353
|
+
np.array(expected_shape[:-1]) / np.array(input_shape[:-1]),
|
354
|
+
)
|
303
355
|
self.assertAllEqual(image_info[3], [0, 0])
|
304
356
|
|
305
357
|
def test_resize_and_crop_masks(self):
|
306
358
|
# shape: (2, 1, 4, 3)
|
307
|
-
masks = tf.constant([
|
308
|
-
[
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
[
|
315
|
-
|
316
|
-
|
317
|
-
|
359
|
+
masks = tf.constant([
|
360
|
+
[[
|
361
|
+
[0, 1, 2],
|
362
|
+
[3, 4, 5],
|
363
|
+
[6, 7, 8],
|
364
|
+
[9, 10, 11],
|
365
|
+
]],
|
366
|
+
[[
|
367
|
+
[12, 13, 14],
|
368
|
+
[15, 16, 17],
|
369
|
+
[18, 19, 20],
|
370
|
+
[21, 22, 23],
|
371
|
+
]],
|
372
|
+
])
|
318
373
|
output = preprocess_ops.resize_and_crop_masks(
|
319
|
-
masks, image_scale=[2.0, 0.5], output_size=[2, 3], offset=[1, 0]
|
374
|
+
masks, image_scale=[2.0, 0.5], output_size=[2, 3], offset=[1, 0]
|
375
|
+
)
|
320
376
|
# shape: (2, 2, 3, 3)
|
321
377
|
expected_output = tf.constant([
|
322
378
|
[
|
@@ -351,24 +407,30 @@ class InputUtilsTest(parameterized.TestCase, tf.test.TestCase):
|
|
351
407
|
(512, 512, 1.0, 1024, 1024, 1024, 1024),
|
352
408
|
)
|
353
409
|
def test_deit3_resize_center_crop(
|
354
|
-
self,
|
355
|
-
|
356
|
-
|
410
|
+
self,
|
411
|
+
input_height,
|
412
|
+
input_width,
|
413
|
+
center_crop_fraction,
|
414
|
+
desired_height,
|
415
|
+
desired_width,
|
416
|
+
output_height,
|
417
|
+
output_width,
|
418
|
+
):
|
357
419
|
# Make sure that with center_crop_ratio = 1; result has desired resolution.
|
358
|
-
image = tf.convert_to_tensor(
|
359
|
-
np.random.rand(input_height, input_width, 3))
|
420
|
+
image = tf.convert_to_tensor(np.random.rand(input_height, input_width, 3))
|
360
421
|
|
361
422
|
desired_size = (desired_height, desired_width)
|
362
423
|
center_cropped = preprocess_ops.center_crop_image(
|
363
|
-
image,
|
364
|
-
|
424
|
+
image, center_crop_fraction=center_crop_fraction
|
425
|
+
)
|
365
426
|
resized_image = tf.image.resize(
|
366
|
-
center_cropped, desired_size, method=tf.image.ResizeMethod.BICUBIC
|
427
|
+
center_cropped, desired_size, method=tf.image.ResizeMethod.BICUBIC
|
428
|
+
)
|
367
429
|
resized_image_shape = tf.shape(resized_image)
|
368
430
|
|
369
431
|
self.assertAllEqual(
|
370
|
-
[output_height, output_width, 3],
|
371
|
-
|
432
|
+
[output_height, output_width, 3], resized_image_shape.numpy()
|
433
|
+
)
|
372
434
|
|
373
435
|
@parameterized.product(
|
374
436
|
prenormalize=[True, False],
|
@@ -420,6 +482,46 @@ class InputUtilsTest(parameterized.TestCase, tf.test.TestCase):
|
|
420
482
|
self.assertShapeEqual(input_image, aug_image)
|
421
483
|
self.assertDTypeEqual(aug_image, np.uint8)
|
422
484
|
|
485
|
+
@parameterized.parameters(0.25, 0.5, 0.75, 1, 1.25, 1.5)
|
486
|
+
def test_resize_and_crop_image_and_masks(self, scale):
|
487
|
+
image = tf.convert_to_tensor(np.random.rand(1024, 2048, 3))
|
488
|
+
label = tf.convert_to_tensor(np.ones((1, 1024, 2048, 1), dtype=np.int32))
|
489
|
+
image, image_info = preprocess_ops.resize_and_crop_image(
|
490
|
+
image, (256, 256), (256, 256), scale, scale, centered_crop=True
|
491
|
+
)
|
492
|
+
image_scale = image_info[2, :]
|
493
|
+
offset = image_info[3, :]
|
494
|
+
label = preprocess_ops.resize_and_crop_masks(
|
495
|
+
label, image_scale, (256, 256), offset, centered_crop=True
|
496
|
+
)
|
497
|
+
self.assertEqual(image.shape[0:2], label.shape[1:3])
|
498
|
+
image_arr = image.numpy()
|
499
|
+
label_arr = np.squeeze(label.numpy())
|
500
|
+
|
501
|
+
scaled_height = round(1024 * 256 * scale / 2048)
|
502
|
+
scaled_width = round(2048 * 256 * scale / 2048)
|
503
|
+
height_offset = max((256 - scaled_height) // 2, 0)
|
504
|
+
width_offset = max((256 - scaled_width) // 2, 0)
|
505
|
+
|
506
|
+
self.assertEqual(
|
507
|
+
label_arr[
|
508
|
+
height_offset : 256 - height_offset,
|
509
|
+
width_offset : 256 - width_offset,
|
510
|
+
].mean(),
|
511
|
+
1,
|
512
|
+
)
|
513
|
+
self.assertEqual(label_arr[0:height_offset, :].mean(), 0)
|
514
|
+
self.assertEqual(image_arr[0:height_offset, :, :].mean(), 0)
|
515
|
+
self.assertEqual(label_arr[256 - height_offset :, :].mean(), 0)
|
516
|
+
self.assertEqual(image_arr[256 - height_offset :, :, :].mean(), 0)
|
517
|
+
if width_offset > 0:
|
518
|
+
self.assertEqual(label_arr[height_offset, 0:width_offset].mean(), 0)
|
519
|
+
self.assertEqual(label_arr[height_offset, 256 - width_offset :].mean(), 0)
|
520
|
+
self.assertEqual(image_arr[height_offset, 0:width_offset, :].mean(), 0)
|
521
|
+
self.assertEqual(
|
522
|
+
image_arr[height_offset, 256 - width_offset :, :].mean(), 0
|
523
|
+
)
|
524
|
+
|
423
525
|
|
424
526
|
if __name__ == '__main__':
|
425
527
|
tf.test.main()
|
@@ -33,12 +33,7 @@ class ClassificationModule(export_base.ExportModule):
|
|
33
33
|
model_config=self.params.task.model,
|
34
34
|
l2_regularizer=None)
|
35
35
|
|
36
|
-
def
|
37
|
-
"""Builds classification model inputs for serving."""
|
38
|
-
# Center crops and resizes image.
|
39
|
-
if isinstance(image, tf.RaggedTensor):
|
40
|
-
image = image.to_tensor()
|
41
|
-
image = tf.cast(image, dtype=tf.float32)
|
36
|
+
def _crop_and_resize(self, image):
|
42
37
|
if self.params.task.train_data.aug_crop:
|
43
38
|
image = preprocess_ops.center_crop_image(image)
|
44
39
|
|
@@ -48,6 +43,21 @@ class ClassificationModule(export_base.ExportModule):
|
|
48
43
|
image = tf.reshape(
|
49
44
|
image, [self._input_image_size[0], self._input_image_size[1], 3])
|
50
45
|
|
46
|
+
return image
|
47
|
+
|
48
|
+
def _build_inputs(self, image):
|
49
|
+
"""Builds classification model inputs for serving."""
|
50
|
+
# Center crops and resizes image.
|
51
|
+
if isinstance(image, tf.RaggedTensor):
|
52
|
+
image = image.to_tensor()
|
53
|
+
image = tf.cast(image, dtype=tf.float32)
|
54
|
+
|
55
|
+
# For these input types, decode_image already performs cropping.
|
56
|
+
if not (
|
57
|
+
self._input_type in ['tf_example', 'image_bytes']
|
58
|
+
and len(self._input_image_size) == 2):
|
59
|
+
image = self._crop_and_resize(image)
|
60
|
+
|
51
61
|
# Normalizes image with mean and std pixel values.
|
52
62
|
image = preprocess_ops.normalize_image(
|
53
63
|
image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
|
@@ -72,14 +82,12 @@ class ClassificationModule(export_base.ExportModule):
|
|
72
82
|
encoded_image_bytes, channels=self._num_channels
|
73
83
|
)
|
74
84
|
image_tensor.set_shape((None, None, self._num_channels))
|
75
|
-
#
|
76
|
-
#
|
77
|
-
image_tensor = tf.
|
78
|
-
|
79
|
-
self._input_image_size,
|
80
|
-
method=tf.image.ResizeMethod.BILINEAR,
|
81
|
-
)
|
85
|
+
# Crop the image inside the same loop as decoding an image
|
86
|
+
# if there could be several images of different sizes in the batch.
|
87
|
+
image_tensor = tf.cast(image_tensor, dtype=tf.float32)
|
88
|
+
image_tensor = self._crop_and_resize(image_tensor)
|
82
89
|
image_tensor = tf.cast(image_tensor, tf.uint8)
|
90
|
+
return image_tensor
|
83
91
|
else:
|
84
92
|
# Convert raw bytes into a tensor and reshape it, if not 2D input.
|
85
93
|
image_tensor = tf.io.decode_raw(encoded_image_bytes, out_type=tf.uint8)
|
@@ -114,7 +114,8 @@ class SemanticSegmentationTask(base_task.Task):
|
|
114
114
|
preserve_aspect_ratio=params.preserve_aspect_ratio,
|
115
115
|
dtype=params.dtype,
|
116
116
|
image_feature=params.image_feature,
|
117
|
-
additional_dense_features=params.additional_dense_features
|
117
|
+
additional_dense_features=params.additional_dense_features,
|
118
|
+
centered_crop=params.centered_crop)
|
118
119
|
|
119
120
|
reader = input_reader_factory.input_reader_generator(
|
120
121
|
params,
|
@@ -965,7 +965,7 @@ official/vision/configs/maskrcnn.py,sha256=yL8kggxXaCTIpSkcozAV2UudO7UqVcEh1_-rM
|
|
965
965
|
official/vision/configs/maskrcnn_test.py,sha256=Wfkbz30h2qxPcpuu6CEpQsf8I_2df6y10-4bRLsWlj8,1733
|
966
966
|
official/vision/configs/retinanet.py,sha256=oCKinkh4IyPslmI1pakwi6dVziwjkZ2cIcpSoGRjqnM,17806
|
967
967
|
official/vision/configs/retinanet_test.py,sha256=ffS3QufQMLF8FZhKNmi7Yr1RDTnIyZ1XKQ9agr2EyW8,1699
|
968
|
-
official/vision/configs/semantic_segmentation.py,sha256=
|
968
|
+
official/vision/configs/semantic_segmentation.py,sha256=4ZAyLWKcFYReyrEWBc5b7wld3mMcuH0RcaRe_4J2RrA,30831
|
969
969
|
official/vision/configs/semantic_segmentation_test.py,sha256=va-ZG6CtBKcs0NicZe6WmJvHxPxxih7nB0orNtrRiEA,1867
|
970
970
|
official/vision/configs/video_classification.py,sha256=tf2XJhD_7c1Ned3eS93Sc0qrQ8U3M_zVJy09KI-9em8,14513
|
971
971
|
official/vision/configs/video_classification_test.py,sha256=I1HSamxRQ3-f4-YHIeUChnT5CtHCxFQdiL0zy6RRUXU,1879
|
@@ -988,7 +988,7 @@ official/vision/dataloaders/input_reader_factory.py,sha256=WpvSA8qyqAo3wkmme4WqX
|
|
988
988
|
official/vision/dataloaders/maskrcnn_input.py,sha256=iCc08yYD-7mvIPojgBjm_nSvoQACXWCIeZNZN8CfXSs,16822
|
989
989
|
official/vision/dataloaders/parser.py,sha256=nMXnhigMa_ascSJ2OK88xi4HdE9xvfL3G4oMrHau-t4,2315
|
990
990
|
official/vision/dataloaders/retinanet_input.py,sha256=joxJL4hQVPw-FW5iUc7RsxP60N7iYGRuVFpU3gC5flE,18291
|
991
|
-
official/vision/dataloaders/segmentation_input.py,sha256=
|
991
|
+
official/vision/dataloaders/segmentation_input.py,sha256=Klg5KAChYZDRvqzZfyIzdPy54rTlWYZp2AotolD3WX8,12934
|
992
992
|
official/vision/dataloaders/tf_example_decoder.py,sha256=9yCT6uSLMpmw50w7zdaRR_BXy6vIvliLZntrYAgzD18,8647
|
993
993
|
official/vision/dataloaders/tf_example_decoder_test.py,sha256=PHxneXHn5-eIMdmk1uI4IPLa178kTCifa4EF53ik2Jo,12629
|
994
994
|
official/vision/dataloaders/tf_example_label_map_decoder.py,sha256=EHu6ZQvYxqjUliOlsN_f4okYt9Hdpydv_lM_dQwrklU,2598
|
@@ -1115,10 +1115,10 @@ official/vision/ops/iou_similarity_test.py,sha256=x5jlcMqMCUYC5cRgdbR0VlAW67AoXo
|
|
1115
1115
|
official/vision/ops/mask_ops.py,sha256=cZLpIowzEA57bXPDbVXa6mktZVHvGSH-TQ1CxHjpQXw,10270
|
1116
1116
|
official/vision/ops/mask_ops_test.py,sha256=D3xbbbleJd4HkpWOSDSEy6hNihsRBY93BqPF6JP-dJk,2835
|
1117
1117
|
official/vision/ops/nms.py,sha256=bKYDAtyV5j6PG7g-RGF2ZccCI5V1xVvuajNblCy1TGs,8125
|
1118
|
-
official/vision/ops/preprocess_ops.py,sha256=
|
1118
|
+
official/vision/ops/preprocess_ops.py,sha256=tDW9apUocwSzGnKkgSrKGwqbcZpb6-WQ8nGnW1_ds20,42569
|
1119
1119
|
official/vision/ops/preprocess_ops_3d.py,sha256=K2583ynA9Zt9pOBNoWSD8KtQR1fwRYBoylJ9NusIBtI,16110
|
1120
1120
|
official/vision/ops/preprocess_ops_3d_test.py,sha256=1MmygNfRBnQYGszsrKBGqP_GrPlP4_UGuddCbkYcIms,8364
|
1121
|
-
official/vision/ops/preprocess_ops_test.py,sha256=
|
1121
|
+
official/vision/ops/preprocess_ops_test.py,sha256=FY3EUvQIb82fYqYJPmzkE7pmkhXIQrd7JNLGFPB6SXI,17213
|
1122
1122
|
official/vision/ops/sampling_ops.py,sha256=1jywCA_E4qDUFWsykCLUwZsWtQSR0QREXdJhvP5qCvc,16072
|
1123
1123
|
official/vision/ops/spatial_transform_ops.py,sha256=PVEJGAn0ygtsrid84vD5GgV0jsjyWoNn14RBzreMxM4,38389
|
1124
1124
|
official/vision/ops/target_gather.py,sha256=Ir3X76yXYEVFSYX5h-yfS8SMkY37GYuypBP2C8ykggo,3965
|
@@ -1140,7 +1140,7 @@ official/vision/serving/export_tfhub_lib.py,sha256=BZC4XSbly0DLDSlWnphjWaD0Q6SOt
|
|
1140
1140
|
official/vision/serving/export_tflite.py,sha256=OhIVHrcDvB83p4WroheWcTmeYArMV9TJcgUVJWytWIc,5115
|
1141
1141
|
official/vision/serving/export_tflite_lib.py,sha256=2AWkyEsLvMBE19m2WAa0IpyFsHV8sIR4Gvcv5ZQWbmg,7272
|
1142
1142
|
official/vision/serving/export_utils.py,sha256=8mJb1MF_6kk3lbqZOZq2Lwu4A2L1KWxjnWnV_ZpYlVI,4881
|
1143
|
-
official/vision/serving/image_classification.py,sha256=
|
1143
|
+
official/vision/serving/image_classification.py,sha256=wEthg6y-geVsRkAuQ1SKv-fnECMFXYuE1qR1H0yCVBA,4562
|
1144
1144
|
official/vision/serving/image_classification_test.py,sha256=qZmuiQewptSQdY2iQEkx8rHjULybgADuXsQ84SjLCok,6759
|
1145
1145
|
official/vision/serving/semantic_segmentation.py,sha256=1RmEOpYz-HjB-VKa3k9p3gZj3h_3ob5d-3RO9_cXfH0,4160
|
1146
1146
|
official/vision/serving/semantic_segmentation_test.py,sha256=QVx12ciMA6T20d3wuKu8pe6ekEyPj8aNbKbRKxOgRik,5638
|
@@ -1150,7 +1150,7 @@ official/vision/tasks/__init__.py,sha256=qfhL5xyDrjZez_zjw613TyciLkqtWm-INFeES7G
|
|
1150
1150
|
official/vision/tasks/image_classification.py,sha256=Oh4tH-SAD0-MmVfglE3pFXI6-LXVJr2C-WwbudnOdSk,16699
|
1151
1151
|
official/vision/tasks/maskrcnn.py,sha256=iC8-OIFyYcQWpdbBODCXEag2R3YUNdQcZsn_wYAd8f0,25569
|
1152
1152
|
official/vision/tasks/retinanet.py,sha256=EFILc2YPDeLqWcH7QtzN2k5sT5KdKQwioh12NKVOIqg,18261
|
1153
|
-
official/vision/tasks/semantic_segmentation.py,sha256=
|
1153
|
+
official/vision/tasks/semantic_segmentation.py,sha256=hQBxyT1qZ1SQd4xHCWETiVNcQs96mYxPEXzdTMY8zTI,14287
|
1154
1154
|
official/vision/tasks/video_classification.py,sha256=F4RnG_OvnayPDeWb8khEp8lFyM6CRWi_FlUaBOgsQjk,14318
|
1155
1155
|
official/vision/utils/__init__.py,sha256=7oiypy0N82PDw9aSdcJBLVoGTd_oRSUOdvuJhMv4leQ,609
|
1156
1156
|
official/vision/utils/ops_test.py,sha256=GqsKndrbfbpsfR6Bhs4gnMelXMVnO66CN5eNna-Wj7Y,4394
|
@@ -1203,9 +1203,9 @@ tensorflow_models/__init__.py,sha256=etxw45SHxuwFCRX5qGxGMP83II0JfJulzNl5GSNJvhw
|
|
1203
1203
|
tensorflow_models/tensorflow_models_test.py,sha256=AxUYUdiQn416UR7jg0h6rmv688esvlKDfpyDCIQkF18,1395
|
1204
1204
|
tensorflow_models/nlp/__init__.py,sha256=4tA5Pf4qaFwT-fIFOpX7x7FHJpnyJT-5UgOeFYTyMlc,807
|
1205
1205
|
tensorflow_models/vision/__init__.py,sha256=zBorY_v5xva1uI-qxhZO3Qh-Dii-Suq6wEYh6hKHDfc,833
|
1206
|
-
tf_models_nightly-2.17.0.
|
1207
|
-
tf_models_nightly-2.17.0.
|
1208
|
-
tf_models_nightly-2.17.0.
|
1209
|
-
tf_models_nightly-2.17.0.
|
1210
|
-
tf_models_nightly-2.17.0.
|
1211
|
-
tf_models_nightly-2.17.0.
|
1206
|
+
tf_models_nightly-2.17.0.dev20240314.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
|
1207
|
+
tf_models_nightly-2.17.0.dev20240314.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
|
1208
|
+
tf_models_nightly-2.17.0.dev20240314.dist-info/METADATA,sha256=rG9SC_Q5R5o4KDAZzsZhuLMumj4osPggvZYK-8MIPUM,1432
|
1209
|
+
tf_models_nightly-2.17.0.dev20240314.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
|
1210
|
+
tf_models_nightly-2.17.0.dev20240314.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
|
1211
|
+
tf_models_nightly-2.17.0.dev20240314.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|