tf-models-nightly 2.17.0.dev20240422__py2.py3-none-any.whl → 2.17.0.dev20240423__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- official/recommendation/uplift/metrics/poisson_metrics.py +3 -0
- official/vision/configs/video_classification.py +1 -0
- official/vision/dataloaders/video_input.py +11 -1
- official/vision/ops/preprocess_ops_3d.py +27 -3
- official/vision/ops/preprocess_ops_3d_test.py +19 -1
- {tf_models_nightly-2.17.0.dev20240422.dist-info → tf_models_nightly-2.17.0.dev20240423.dist-info}/METADATA +1 -1
- {tf_models_nightly-2.17.0.dev20240422.dist-info → tf_models_nightly-2.17.0.dev20240423.dist-info}/RECORD +11 -11
- {tf_models_nightly-2.17.0.dev20240422.dist-info → tf_models_nightly-2.17.0.dev20240423.dist-info}/AUTHORS +0 -0
- {tf_models_nightly-2.17.0.dev20240422.dist-info → tf_models_nightly-2.17.0.dev20240423.dist-info}/LICENSE +0 -0
- {tf_models_nightly-2.17.0.dev20240422.dist-info → tf_models_nightly-2.17.0.dev20240423.dist-info}/WHEEL +0 -0
- {tf_models_nightly-2.17.0.dev20240422.dist-info → tf_models_nightly-2.17.0.dev20240423.dist-info}/top_level.txt +0 -0
@@ -130,6 +130,7 @@ def _safe_x_minus_xlogx(x: tf.Tensor) -> tf.Tensor:
|
|
130
130
|
return tf.where(tf.equal(x, 0.0), tf.zeros_like(x), values)
|
131
131
|
|
132
132
|
|
133
|
+
@tf_keras.utils.register_keras_serializable(package="Uplift")
|
133
134
|
class LogLossMeanBaseline(tf_keras.metrics.Metric):
|
134
135
|
"""Computes the (weighted) poisson log loss for a mean predictor."""
|
135
136
|
|
@@ -200,6 +201,7 @@ class LogLossMeanBaseline(tf_keras.metrics.Metric):
|
|
200
201
|
return cls(**config)
|
201
202
|
|
202
203
|
|
204
|
+
@tf_keras.utils.register_keras_serializable(package="Uplift")
|
203
205
|
class LogLossMinimum(tf_keras.metrics.Metric):
|
204
206
|
"""Computes the minimum achievable (weighted) poisson log loss.
|
205
207
|
|
@@ -278,6 +280,7 @@ class LogLossMinimum(tf_keras.metrics.Metric):
|
|
278
280
|
return cls(**config)
|
279
281
|
|
280
282
|
|
283
|
+
@tf_keras.utils.register_keras_serializable(package="Uplift")
|
281
284
|
class PseudoRSquared(tf_keras.metrics.Metric):
|
282
285
|
"""Computes the pseudo R-squared metric for poisson regression.
|
283
286
|
|
@@ -59,6 +59,7 @@ class DataConfig(cfg.DataConfig):
|
|
59
59
|
aug_max_aspect_ratio: float = 2.0
|
60
60
|
aug_min_area_ratio: float = 0.49
|
61
61
|
aug_max_area_ratio: float = 1.0
|
62
|
+
aug_random_rotation: bool = False
|
62
63
|
aug_type: Optional[
|
63
64
|
common.Augmentation] = None # AutoAugment and RandAugment.
|
64
65
|
mixup_and_cutmix: Optional[common.MixupAndCutmix] = None
|
@@ -44,6 +44,7 @@ def process_image(image: tf.Tensor,
|
|
44
44
|
max_aspect_ratio: float = 2,
|
45
45
|
min_area_ratio: float = 0.49,
|
46
46
|
max_area_ratio: float = 1.0,
|
47
|
+
random_rotation: bool = False,
|
47
48
|
augmenter: Optional[augment.ImageAugment] = None,
|
48
49
|
seed: Optional[int] = None,
|
49
50
|
input_image_format: Optional[str] = 'jpeg') -> tf.Tensor:
|
@@ -77,10 +78,11 @@ def process_image(image: tf.Tensor,
|
|
77
78
|
max_aspect_ratio: The maximum aspect range for cropping.
|
78
79
|
min_area_ratio: The minimum area range for cropping.
|
79
80
|
max_area_ratio: The maximum area range for cropping.
|
81
|
+
random_rotation: Use uniform random rotation augmentation or not.
|
80
82
|
augmenter: Image augmenter to distort each image.
|
81
83
|
seed: A deterministic seed to use when sampling.
|
82
84
|
input_image_format: The format of input image which could be jpeg, png or
|
83
|
-
|
85
|
+
none for unknown or mixed datasets.
|
84
86
|
|
85
87
|
Returns:
|
86
88
|
Processed frames. Tensor of shape
|
@@ -136,6 +138,8 @@ def process_image(image: tf.Tensor,
|
|
136
138
|
(min_aspect_ratio, max_aspect_ratio),
|
137
139
|
(min_area_ratio, max_area_ratio))
|
138
140
|
image = preprocess_ops_3d.random_flip_left_right(image, seed)
|
141
|
+
if random_rotation:
|
142
|
+
image = preprocess_ops_3d.random_rotation(image, seed)
|
139
143
|
|
140
144
|
if augmenter is not None:
|
141
145
|
image = augmenter.distort(image)
|
@@ -303,6 +307,7 @@ class Parser(parser.Parser):
|
|
303
307
|
self._min_area_ratio = input_params.aug_min_area_ratio
|
304
308
|
self._max_area_ratio = input_params.aug_max_area_ratio
|
305
309
|
self._input_image_format = input_params.input_image_format
|
310
|
+
self._random_rotation = input_params.aug_random_rotation
|
306
311
|
if self._output_audio:
|
307
312
|
self._audio_feature = input_params.audio_feature
|
308
313
|
self._audio_shape = input_params.audio_feature_shape
|
@@ -329,6 +334,10 @@ class Parser(parser.Parser):
|
|
329
334
|
'Augmentation policy {} not supported.'.format(aug_type.type))
|
330
335
|
else:
|
331
336
|
self._augmenter = None
|
337
|
+
if self._random_rotation:
|
338
|
+
logging.info('Using standard augmentation with rotation.')
|
339
|
+
else:
|
340
|
+
logging.info('Using standard augmentation without rotation.')
|
332
341
|
|
333
342
|
def _parse_train_data(
|
334
343
|
self, decoded_tensors: Dict[str, tf.Tensor]
|
@@ -350,6 +359,7 @@ class Parser(parser.Parser):
|
|
350
359
|
max_aspect_ratio=self._max_aspect_ratio,
|
351
360
|
min_area_ratio=self._min_area_ratio,
|
352
361
|
max_area_ratio=self._max_area_ratio,
|
362
|
+
random_rotation=self._random_rotation,
|
353
363
|
augmenter=self._augmenter,
|
354
364
|
zero_centering_image=self._zero_centering_image,
|
355
365
|
input_image_format=self._input_image_format)
|
@@ -404,9 +404,33 @@ def random_flip_left_right(frames: tf.Tensor,
|
|
404
404
|
return frames
|
405
405
|
|
406
406
|
|
407
|
-
def
|
408
|
-
|
409
|
-
|
407
|
+
def random_rotation(frames: tf.Tensor, seed: Optional[int] = None) -> tf.Tensor:
|
408
|
+
"""Randomly rotate all frames with 0, 90, 180, or 270 degrees.
|
409
|
+
|
410
|
+
Args:
|
411
|
+
frames: A Tensor of shape [timesteps, input_h, input_w, channels].
|
412
|
+
seed: A seed to use for the random sampling.
|
413
|
+
|
414
|
+
Returns:
|
415
|
+
A Tensor of shape [timesteps, output_h, output_w, channels] eventually
|
416
|
+
rotated at 0/90/180/270 degrees.
|
417
|
+
"""
|
418
|
+
rotation_times = tf.random.uniform(
|
419
|
+
(), minval=0, maxval=4, dtype=tf.int32, seed=seed
|
420
|
+
)
|
421
|
+
frames = tf.cond(
|
422
|
+
tf.greater(rotation_times, 0),
|
423
|
+
true_fn=lambda: tf.image.rot90(frames, k=rotation_times),
|
424
|
+
false_fn=lambda: frames,
|
425
|
+
)
|
426
|
+
return frames
|
427
|
+
|
428
|
+
|
429
|
+
def normalize_image(
|
430
|
+
frames: tf.Tensor,
|
431
|
+
zero_centering_image: bool,
|
432
|
+
dtype: tf.dtypes.DType = tf.float32,
|
433
|
+
) -> tf.Tensor:
|
410
434
|
"""Normalizes images.
|
411
435
|
|
412
436
|
Args:
|
@@ -12,7 +12,6 @@
|
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
14
|
|
15
|
-
|
16
15
|
import io
|
17
16
|
import itertools
|
18
17
|
import numpy as np
|
@@ -181,6 +180,25 @@ class ParserUtilsTest(tf.test.TestCase):
|
|
181
180
|
self.assertTrue((flipped_frames == self._np_frames).numpy().all() or (
|
182
181
|
flipped_frames == flipped).numpy().all())
|
183
182
|
|
183
|
+
def test_random_rotation(self):
|
184
|
+
rotated_frames = preprocess_ops_3d.random_rotation(self._frames)
|
185
|
+
|
186
|
+
rotated_once = np.rot90(self._np_frames[0, :, :, 0], 1)
|
187
|
+
rotated_twice = np.rot90(self._np_frames[0, :, :, 0], 2)
|
188
|
+
rotated_thrice = np.rot90(self._np_frames[0, :, :, 0], 3)
|
189
|
+
rotated_once = rotated_once[np.newaxis, :, :, np.newaxis]
|
190
|
+
rotated_twice = rotated_twice[np.newaxis, :, :, np.newaxis]
|
191
|
+
rotated_thrice = rotated_thrice[np.newaxis, :, :, np.newaxis]
|
192
|
+
rotated_once = np.broadcast_to(rotated_once, (6, 120, 90, 3))
|
193
|
+
rotated_twice = np.broadcast_to(rotated_twice, (6, 90, 120, 3))
|
194
|
+
rotated_thrice = np.broadcast_to(rotated_thrice, (6, 120, 90, 3))
|
195
|
+
self.assertTrue(
|
196
|
+
(rotated_frames == self._np_frames).numpy().all()
|
197
|
+
or (rotated_frames == rotated_once).numpy().all()
|
198
|
+
or (rotated_frames == rotated_twice).numpy().all()
|
199
|
+
or (rotated_frames == rotated_thrice).numpy().all()
|
200
|
+
)
|
201
|
+
|
184
202
|
def test_normalize_image(self):
|
185
203
|
normalized_images_1 = preprocess_ops_3d.normalize_image(
|
186
204
|
self._frames, False, tf.float32)
|
@@ -915,7 +915,7 @@ official/recommendation/uplift/metrics/label_variance_test.py,sha256=k0mdEU1WU53
|
|
915
915
|
official/recommendation/uplift/metrics/loss_metric.py,sha256=gYZdnTsuL_2q1FZuPip-DaWxt_Q-02YYaePyMBVNx7w,7344
|
916
916
|
official/recommendation/uplift/metrics/loss_metric_test.py,sha256=48rQG8bKFdy0xBFjoOLXKRUlYpCEyAzSmPOFoF7FX94,16021
|
917
917
|
official/recommendation/uplift/metrics/metric_configs.py,sha256=Z-r79orE4EycQ5TJ7xdI5LhjOHT3wzChYyDxcxGqLXk,1670
|
918
|
-
official/recommendation/uplift/metrics/poisson_metrics.py,sha256=
|
918
|
+
official/recommendation/uplift/metrics/poisson_metrics.py,sha256=YV-5RgwOBihRoTgmbEYmnKKLPMxRhQjLGJ4jmDsGUVo,14661
|
919
919
|
official/recommendation/uplift/metrics/poisson_metrics_test.py,sha256=zGo7Y7XJY4kvecoeAy7Jci_N3YZKMTKpj28j7ZtoTLc,16378
|
920
920
|
official/recommendation/uplift/metrics/sliced_metric.py,sha256=uhvzudOWtMNKZ0avwGhX-37UELR9Cq9b4C0g8erBkXw,8688
|
921
921
|
official/recommendation/uplift/metrics/sliced_metric_test.py,sha256=bhVGyI1tOkFkVOtruJo3p6XopDFyG1JW5qdZm9-RqeU,12248
|
@@ -969,7 +969,7 @@ official/vision/configs/retinanet.py,sha256=oCKinkh4IyPslmI1pakwi6dVziwjkZ2cIcpS
|
|
969
969
|
official/vision/configs/retinanet_test.py,sha256=ffS3QufQMLF8FZhKNmi7Yr1RDTnIyZ1XKQ9agr2EyW8,1699
|
970
970
|
official/vision/configs/semantic_segmentation.py,sha256=4ZAyLWKcFYReyrEWBc5b7wld3mMcuH0RcaRe_4J2RrA,30831
|
971
971
|
official/vision/configs/semantic_segmentation_test.py,sha256=va-ZG6CtBKcs0NicZe6WmJvHxPxxih7nB0orNtrRiEA,1867
|
972
|
-
official/vision/configs/video_classification.py,sha256=
|
972
|
+
official/vision/configs/video_classification.py,sha256=DDiBq9tCfk3x-FNprWvWc5hLaVAjGLTWgGvXCpElbw4,14549
|
973
973
|
official/vision/configs/video_classification_test.py,sha256=I1HSamxRQ3-f4-YHIeUChnT5CtHCxFQdiL0zy6RRUXU,1879
|
974
974
|
official/vision/data/__init__.py,sha256=7oiypy0N82PDw9aSdcJBLVoGTd_oRSUOdvuJhMv4leQ,609
|
975
975
|
official/vision/data/create_coco_tf_record.py,sha256=UOvAPYONUAkoeWMtE18fVa6qfqzu_kulrwDkEpUXcjM,22742
|
@@ -1003,7 +1003,7 @@ official/vision/dataloaders/tfds_segmentation_decoders.py,sha256=-T8hrcEjaUivvtQ
|
|
1003
1003
|
official/vision/dataloaders/tfexample_utils.py,sha256=qYUaT3V2zcEybJooImTxRT6MbCYzKAc_6iZPzIgBiyw,12698
|
1004
1004
|
official/vision/dataloaders/utils.py,sha256=0E20HOYq-NeK479bmVmXQakD8M9GQn_lyWeyVXRdWq4,3113
|
1005
1005
|
official/vision/dataloaders/utils_test.py,sha256=8gG1b0Wx8mjvAxEEnqSOBIXOGNGQM0zAKpX815umKu4,2567
|
1006
|
-
official/vision/dataloaders/video_input.py,sha256=
|
1006
|
+
official/vision/dataloaders/video_input.py,sha256=4TuPmV3W0c5vcNPeQNX8FdlCXqz3fGecss9vMi5WjJs,17756
|
1007
1007
|
official/vision/dataloaders/video_input_test.py,sha256=fhHh5lft0KthdJ0_6n3kP3dZ6y8GLkji1Rlq4zP0jbU,7551
|
1008
1008
|
official/vision/evaluation/__init__.py,sha256=7oiypy0N82PDw9aSdcJBLVoGTd_oRSUOdvuJhMv4leQ,609
|
1009
1009
|
official/vision/evaluation/coco_evaluator.py,sha256=Nv6ElDBw2CSHFfogWpnAgCnIlMwmt5SWDbt-YeIvR3s,15601
|
@@ -1118,8 +1118,8 @@ official/vision/ops/mask_ops.py,sha256=cZLpIowzEA57bXPDbVXa6mktZVHvGSH-TQ1CxHjpQ
|
|
1118
1118
|
official/vision/ops/mask_ops_test.py,sha256=D3xbbbleJd4HkpWOSDSEy6hNihsRBY93BqPF6JP-dJk,2835
|
1119
1119
|
official/vision/ops/nms.py,sha256=bKYDAtyV5j6PG7g-RGF2ZccCI5V1xVvuajNblCy1TGs,8125
|
1120
1120
|
official/vision/ops/preprocess_ops.py,sha256=tDW9apUocwSzGnKkgSrKGwqbcZpb6-WQ8nGnW1_ds20,42569
|
1121
|
-
official/vision/ops/preprocess_ops_3d.py,sha256=
|
1122
|
-
official/vision/ops/preprocess_ops_3d_test.py,sha256=
|
1121
|
+
official/vision/ops/preprocess_ops_3d.py,sha256=o0ivTBK2BwaiPp_NhVG_yBc10VUySxfE7eKQkL7RNaU,16762
|
1122
|
+
official/vision/ops/preprocess_ops_3d_test.py,sha256=LA3-Ue4FTjdsr5Kl_BnpAMNcfikWylMisD2GrBTOLzA,9309
|
1123
1123
|
official/vision/ops/preprocess_ops_test.py,sha256=FY3EUvQIb82fYqYJPmzkE7pmkhXIQrd7JNLGFPB6SXI,17213
|
1124
1124
|
official/vision/ops/sampling_ops.py,sha256=1jywCA_E4qDUFWsykCLUwZsWtQSR0QREXdJhvP5qCvc,16072
|
1125
1125
|
official/vision/ops/spatial_transform_ops.py,sha256=PVEJGAn0ygtsrid84vD5GgV0jsjyWoNn14RBzreMxM4,38389
|
@@ -1206,9 +1206,9 @@ tensorflow_models/tensorflow_models_test.py,sha256=nc6A9K53OGqF25xN5St8EiWvdVbda
|
|
1206
1206
|
tensorflow_models/nlp/__init__.py,sha256=4tA5Pf4qaFwT-fIFOpX7x7FHJpnyJT-5UgOeFYTyMlc,807
|
1207
1207
|
tensorflow_models/uplift/__init__.py,sha256=mqfa55gweOdpKoaQyid4A_4u7xw__FcQeSIF0k_pYmI,999
|
1208
1208
|
tensorflow_models/vision/__init__.py,sha256=zBorY_v5xva1uI-qxhZO3Qh-Dii-Suq6wEYh6hKHDfc,833
|
1209
|
-
tf_models_nightly-2.17.0.
|
1210
|
-
tf_models_nightly-2.17.0.
|
1211
|
-
tf_models_nightly-2.17.0.
|
1212
|
-
tf_models_nightly-2.17.0.
|
1213
|
-
tf_models_nightly-2.17.0.
|
1214
|
-
tf_models_nightly-2.17.0.
|
1209
|
+
tf_models_nightly-2.17.0.dev20240423.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
|
1210
|
+
tf_models_nightly-2.17.0.dev20240423.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
|
1211
|
+
tf_models_nightly-2.17.0.dev20240423.dist-info/METADATA,sha256=kTJSu84yICl-fJ2_SP7HYjLNi0CEFgUfn2IULGSkst0,1432
|
1212
|
+
tf_models_nightly-2.17.0.dev20240423.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
|
1213
|
+
tf_models_nightly-2.17.0.dev20240423.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
|
1214
|
+
tf_models_nightly-2.17.0.dev20240423.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|