tf-models-nightly 2.17.0.dev20240314__py2.py3-none-any.whl → 2.17.0.dev20240321__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -46,6 +46,7 @@ from official.nlp.modeling.layers.moe import MoeLayerWithBackbone
46
46
  from official.nlp.modeling.layers.multi_channel_attention import *
47
47
  from official.nlp.modeling.layers.on_device_embedding import OnDeviceEmbedding
48
48
  from official.nlp.modeling.layers.pack_optimization import PackBertEmbeddings
49
+ from official.nlp.modeling.layers.pack_optimization import StridedReZeroTransformer
49
50
  from official.nlp.modeling.layers.pack_optimization import StridedTransformerEncoderBlock
50
51
  from official.nlp.modeling.layers.pack_optimization import StridedTransformerScaffold
51
52
  from official.nlp.modeling.layers.per_dim_scale_attention import PerDimScaleAttention
@@ -22,7 +22,6 @@ from typing import Dict, Optional, Tuple
22
22
 
23
23
  import tensorflow as tf, tf_keras
24
24
 
25
- from official.vision.ops import anchor_generator
26
25
  from official.vision.ops import box_matcher
27
26
  from official.vision.ops import iou_similarity
28
27
  from official.vision.ops import target_gather
@@ -32,7 +31,38 @@ from official.vision.utils.object_detection import faster_rcnn_box_coder
32
31
 
33
32
 
34
33
  class Anchor(object):
35
- """Anchor class for anchor-based object detectors."""
34
+ """Anchor class for anchor-based object detectors.
35
+
36
+ Example:
37
+ ```python
38
+ anchor_boxes = Anchor(
39
+ min_level=3,
40
+ max_level=4,
41
+ num_scales=2,
42
+ aspect_ratios=[0.5, 1., 2.],
43
+ anchor_size=4.,
44
+ image_size=[256, 256],
45
+ ).multilevel_boxes
46
+ ```
47
+
48
+ Attributes:
49
+ min_level: integer number of minimum level of the output feature pyramid.
50
+ max_level: integer number of maximum level of the output feature pyramid.
51
+ num_scales: integer number representing intermediate scales added on each
52
+ level. For instances, num_scales=2 adds one additional intermediate
53
+ anchor scales [2^0, 2^0.5] on each level.
54
+ aspect_ratios: list of float numbers representing the aspect ratio anchors
55
+ added on each level. The number indicates the ratio of width to height.
56
+ For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each
57
+ scale level.
58
+ anchor_size: float number representing the scale of size of the base
59
+ anchor to the feature stride 2^level.
60
+ image_size: a list of integer numbers or Tensors representing [height,
61
+ width] of the input image size.
62
+ multilevel_boxes: an OrderedDict from level to the generated anchor boxes of
63
+ shape [height_l, width_l, num_anchors_per_location * 4].
64
+ anchors_per_location: number of anchors per pixel location.
65
+ """
36
66
 
37
67
  def __init__(
38
68
  self,
@@ -43,57 +73,40 @@ class Anchor(object):
43
73
  anchor_size,
44
74
  image_size,
45
75
  ):
46
- """Constructs multi-scale anchors.
47
-
48
- Args:
49
- min_level: integer number of minimum level of the output feature pyramid.
50
- max_level: integer number of maximum level of the output feature pyramid.
51
- num_scales: integer number representing intermediate scales added on each
52
- level. For instances, num_scales=2 adds one additional intermediate
53
- anchor scales [2^0, 2^0.5] on each level.
54
- aspect_ratios: list of float numbers representing the aspect ratio anchors
55
- added on each level. The number indicates the ratio of width to height.
56
- For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each
57
- scale level.
58
- anchor_size: float number representing the scale of size of the base
59
- anchor to the feature stride 2^level.
60
- image_size: a list of integer numbers or Tensors representing [height,
61
- width] of the input image size.The image_size should be divided by the
62
- largest feature stride 2^max_level.
63
- """
76
+ """Initializes the instance."""
64
77
  self.min_level = min_level
65
78
  self.max_level = max_level
66
79
  self.num_scales = num_scales
67
80
  self.aspect_ratios = aspect_ratios
68
81
  self.anchor_size = anchor_size
69
82
  self.image_size = image_size
70
- self.boxes = self._generate_boxes()
83
+ self.multilevel_boxes = self._generate_multilevel_boxes()
71
84
 
72
- def _generate_boxes(self) -> tf.Tensor:
85
+ def _generate_multilevel_boxes(self) -> Dict[str, tf.Tensor]:
73
86
  """Generates multi-scale anchor boxes.
74
87
 
75
88
  Returns:
76
- a Tensor of shape [N, 4], representing anchor boxes of all levels
77
- concatenated together.
89
+ An OrderedDict from level to anchor boxes of shape [height_l, width_l,
90
+ num_anchors_per_location * 4].
78
91
  """
79
- boxes_all = []
92
+ multilevel_boxes = collections.OrderedDict()
80
93
  for level in range(self.min_level, self.max_level + 1):
81
94
  boxes_l = []
82
- feat_size = math.ceil(self.image_size[0] / 2**level)
83
- stride = tf.cast(self.image_size[0] / feat_size, tf.float32)
95
+ feat_size_y = math.ceil(self.image_size[0] / 2**level)
96
+ feat_size_x = math.ceil(self.image_size[1] / 2**level)
97
+ stride_y = tf.cast(self.image_size[0] / feat_size_y, tf.float32)
98
+ stride_x = tf.cast(self.image_size[1] / feat_size_x, tf.float32)
99
+ x = tf.range(stride_x / 2, self.image_size[1], stride_x)
100
+ y = tf.range(stride_y / 2, self.image_size[0], stride_y)
101
+ xv, yv = tf.meshgrid(x, y)
84
102
  for scale in range(self.num_scales):
85
103
  for aspect_ratio in self.aspect_ratios:
86
- intermidate_scale = 2 ** (scale / float(self.num_scales))
87
- base_anchor_size = self.anchor_size * stride * intermidate_scale
104
+ intermidate_scale = 2 ** (scale / self.num_scales)
105
+ base_anchor_size = self.anchor_size * 2**level * intermidate_scale
88
106
  aspect_x = aspect_ratio**0.5
89
107
  aspect_y = aspect_ratio**-0.5
90
108
  half_anchor_size_x = base_anchor_size * aspect_x / 2.0
91
109
  half_anchor_size_y = base_anchor_size * aspect_y / 2.0
92
- x = tf.range(stride / 2, self.image_size[1], stride)
93
- y = tf.range(stride / 2, self.image_size[0], stride)
94
- xv, yv = tf.meshgrid(x, y)
95
- xv = tf.cast(tf.reshape(xv, [-1]), dtype=tf.float32)
96
- yv = tf.cast(tf.reshape(yv, [-1]), dtype=tf.float32)
97
110
  # Tensor shape Nx4.
98
111
  boxes = tf.stack(
99
112
  [
@@ -102,41 +115,18 @@ class Anchor(object):
102
115
  yv + half_anchor_size_y,
103
116
  xv + half_anchor_size_x,
104
117
  ],
105
- axis=1,
118
+ axis=-1,
106
119
  )
107
120
  boxes_l.append(boxes)
108
- # Concat anchors on the same level to tensor shape NxAx4.
109
- boxes_l = tf.stack(boxes_l, axis=1)
110
- boxes_l = tf.reshape(boxes_l, [-1, 4])
111
- boxes_all.append(boxes_l)
112
- return tf.concat(boxes_all, axis=0)
113
-
114
- def unpack_labels(self, labels: tf.Tensor) -> Dict[str, tf.Tensor]:
115
- """Unpacks an array of labels into multi-scales labels."""
116
- unpacked_labels = collections.OrderedDict()
117
- count = 0
118
- for level in range(self.min_level, self.max_level + 1):
119
- feat_size_y = tf.cast(
120
- math.ceil(self.image_size[0] / 2**level), tf.int32
121
- )
122
- feat_size_x = tf.cast(
123
- math.ceil(self.image_size[1] / 2**level), tf.int32
124
- )
125
- steps = feat_size_y * feat_size_x * self.anchors_per_location
126
- unpacked_labels[str(level)] = tf.reshape(
127
- labels[count : count + steps], [feat_size_y, feat_size_x, -1]
128
- )
129
- count += steps
130
- return unpacked_labels
121
+ # Concat anchors on the same level to tensor shape HxWx(Ax4).
122
+ boxes_l = tf.concat(boxes_l, axis=-1)
123
+ multilevel_boxes[str(level)] = boxes_l
124
+ return multilevel_boxes
131
125
 
132
126
  @property
133
- def anchors_per_location(self):
127
+ def anchors_per_location(self) -> int:
134
128
  return self.num_scales * len(self.aspect_ratios)
135
129
 
136
- @property
137
- def multilevel_boxes(self):
138
- return self.unpack_labels(self.boxes)
139
-
140
130
 
141
131
  class AnchorLabeler(object):
142
132
  """Labeler for dense object detector."""
@@ -420,24 +410,68 @@ class RpnAnchorLabeler(AnchorLabeler):
420
410
  return score_targets_dict, box_targets_dict
421
411
 
422
412
 
413
+ class AnchorGeneratorv2:
414
+ """Utility to generate anchors for a multiple feature maps.
415
+
416
+ Attributes:
417
+ min_level: integer number of minimum level of the output feature pyramid.
418
+ max_level: integer number of maximum level of the output feature pyramid.
419
+ num_scales: integer number representing intermediate scales added on each
420
+ level. For instances, num_scales=2 adds one additional intermediate
421
+ anchor scales [2^0, 2^0.5] on each level.
422
+ aspect_ratios: list of float numbers representing the aspect ratio anchors
423
+ added on each level. The number indicates the ratio of width to height.
424
+ For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each
425
+ scale level.
426
+ anchor_size: float number representing the scale of size of the base
427
+ anchor to the feature stride 2^level.
428
+ """
429
+
430
+ def __init__(
431
+ self,
432
+ min_level,
433
+ max_level,
434
+ num_scales,
435
+ aspect_ratios,
436
+ anchor_size,
437
+ ):
438
+ """Initializes the instance."""
439
+ self.min_level = min_level
440
+ self.max_level = max_level
441
+ self.num_scales = num_scales
442
+ self.aspect_ratios = aspect_ratios
443
+ self.anchor_size = anchor_size
444
+
445
+ def __call__(self, image_size):
446
+ """Generate multilevel anchor boxes.
447
+
448
+ Args:
449
+ image_size: a list of integer numbers or Tensors representing [height,
450
+ width] of the input image size.
451
+ Returns:
452
+ An ordered dictionary from level to anchor boxes of shape [height_l,
453
+ width_l, num_anchors_per_location * 4].
454
+ """
455
+ return Anchor(
456
+ min_level=self.min_level,
457
+ max_level=self.max_level,
458
+ num_scales=self.num_scales,
459
+ aspect_ratios=self.aspect_ratios,
460
+ anchor_size=self.anchor_size,
461
+ image_size=image_size,
462
+ ).multilevel_boxes
463
+
464
+
423
465
  def build_anchor_generator(
424
466
  min_level, max_level, num_scales, aspect_ratios, anchor_size
425
467
  ):
426
468
  """Build anchor generator from levels."""
427
- anchor_sizes = collections.OrderedDict()
428
- strides = collections.OrderedDict()
429
- scales = []
430
- for scale in range(num_scales):
431
- scales.append(2 ** (scale / float(num_scales)))
432
- for level in range(min_level, max_level + 1):
433
- stride = 2**level
434
- strides[str(level)] = stride
435
- anchor_sizes[str(level)] = anchor_size * stride
436
- anchor_gen = anchor_generator.AnchorGenerator(
437
- anchor_sizes=anchor_sizes,
438
- scales=scales,
469
+ anchor_gen = AnchorGeneratorv2(
470
+ min_level=min_level,
471
+ max_level=max_level,
472
+ num_scales=num_scales,
439
473
  aspect_ratios=aspect_ratios,
440
- strides=strides,
474
+ anchor_size=anchor_size,
441
475
  )
442
476
  return anchor_gen
443
477
 
@@ -109,7 +109,7 @@ class _SingleAnchorGenerator:
109
109
  return tf.reshape(result, [shape[0], shape[1], shape[2] * shape[3]])
110
110
 
111
111
 
112
- class AnchorGenerator():
112
+ class AnchorGeneratorv1():
113
113
  """Utility to generate anchors for a multiple feature maps.
114
114
 
115
115
  Example:
@@ -77,7 +77,7 @@ class MultiScaleAnchorGeneratorTest(parameterized.TestCase, tf.test.TestCase):
77
77
  levels = range(min_level, max_level + 1)
78
78
  anchor_sizes = [2**(level + 1) for level in levels]
79
79
  strides = [2**level for level in levels]
80
- anchor_gen = anchor_generator.AnchorGenerator(
80
+ anchor_gen = anchor_generator.AnchorGeneratorv1(
81
81
  anchor_sizes=anchor_sizes,
82
82
  scales=[1.],
83
83
  aspect_ratios=aspect_ratios,
@@ -98,7 +98,7 @@ class MultiScaleAnchorGeneratorTest(parameterized.TestCase, tf.test.TestCase):
98
98
  levels = range(min_level, max_level + 1)
99
99
  anchor_sizes = [2**(level + 1) for level in levels]
100
100
  strides = [2**level for level in levels]
101
- anchor_gen = anchor_generator.AnchorGenerator(
101
+ anchor_gen = anchor_generator.AnchorGeneratorv1(
102
102
  anchor_sizes=anchor_sizes,
103
103
  scales=[1.],
104
104
  aspect_ratios=aspect_ratios,
@@ -122,7 +122,7 @@ class MultiScaleAnchorGeneratorTest(parameterized.TestCase, tf.test.TestCase):
122
122
  levels = range(min_level, max_level + 1)
123
123
  anchor_sizes = dict((str(level), 2**(level + 1)) for level in levels)
124
124
  strides = dict((str(level), 2**level) for level in levels)
125
- anchor_gen = anchor_generator.AnchorGenerator(
125
+ anchor_gen = anchor_generator.AnchorGeneratorv1(
126
126
  anchor_sizes=anchor_sizes,
127
127
  scales=[1.],
128
128
  aspect_ratios=aspect_ratios,
@@ -58,40 +58,52 @@ class AnchorTest(parameterized.TestCase, tf.test.TestCase):
58
58
  self.assertEqual(negatives, expected_negatives)
59
59
 
60
60
  @parameterized.parameters(
61
- # Single scale anchor.
62
- (5, 5, 1, [1.0], 2.0,
63
- [[-16, -16, 48, 48], [-16, 16, 48, 80],
64
- [16, -16, 80, 48], [16, 16, 80, 80]]),
65
- # Multi scale anchor.
66
- (5, 6, 1, [1.0], 2.0,
67
- [[-16, -16, 48, 48], [-16, 16, 48, 80],
68
- [16, -16, 80, 48], [16, 16, 80, 80], [-32, -32, 96, 96]]),
69
- # # Multi aspect ratio anchor.
70
- (6, 6, 1, [1.0, 4.0, 0.25], 2.0,
71
- [[-32, -32, 96, 96], [-0, -96, 64, 160], [-96, -0, 160, 64]]),
72
-
61
+ # Single scale anchor
62
+ (5, 5, 1, [1.0], 2.0, [64, 64],
63
+ {'5': [[[-16, -16, 48, 48], [-16, 16, 48, 80]],
64
+ [[16, -16, 80, 48], [16, 16, 80, 80]]]}),
65
+ # Multi scale anchor
66
+ (5, 6, 1, [1.0], 2.0, [64, 64],
67
+ {'5': [[[-16, -16, 48, 48], [-16, 16, 48, 80]],
68
+ [[16, -16, 80, 48], [16, 16, 80, 80]]],
69
+ '6': [[[-32, -32, 96, 96]]]}),
70
+ # Multi aspect ratio anchor
71
+ (6, 6, 1, [1.0, 4.0, 0.25], 2.0, [64, 64],
72
+ {'6': [[[-32, -32, 96, 96, -0, -96, 64, 160, -96, -0, 160, 64]]]}),
73
+ # Intermidate scales
74
+ (5, 5, 2, [1.0], 1.0, [32, 32],
75
+ {'5': [[[0, 0, 32, 32,
76
+ 16 - 16 * 2**0.5, 16 - 16 * 2**0.5,
77
+ 16 + 16 * 2**0.5, 16 + 16 * 2**0.5]]]}),
78
+ # Non-square
79
+ (5, 5, 1, [1.0], 1.0, [64, 32],
80
+ {'5': [[[0, 0, 32, 32]],
81
+ [[32, 0, 64, 32]]]}),
82
+ # Indivisible by 2^level
83
+ (5, 5, 1, [1.0], 1.0, [40, 32],
84
+ {'5': [[[-6, 0, 26, 32]],
85
+ [[14, 0, 46, 32]]]}),
73
86
  )
74
87
  def testAnchorGeneration(self, min_level, max_level, num_scales,
75
- aspect_ratios, anchor_size, expected_boxes):
76
- image_size = [64, 64]
88
+ aspect_ratios, anchor_size, image_size,
89
+ expected_boxes):
77
90
  anchors = anchor.Anchor(min_level, max_level, num_scales, aspect_ratios,
78
91
  anchor_size, image_size)
79
- boxes = anchors.boxes.numpy()
80
- self.assertEqual(expected_boxes, boxes.tolist())
92
+ self.assertAllClose(expected_boxes, anchors.multilevel_boxes)
81
93
 
82
94
  @parameterized.parameters(
83
95
  # Single scale anchor.
84
96
  (5, 5, 1, [1.0], 2.0,
85
- [[-16, -16, 48, 48], [-16, 16, 48, 80],
86
- [16, -16, 80, 48], [16, 16, 80, 80]]),
97
+ {'5': [[[-16, -16, 48, 48], [-16, 16, 48, 80]],
98
+ [[16, -16, 80, 48], [16, 16, 80, 80]]]}),
87
99
  # Multi scale anchor.
88
100
  (5, 6, 1, [1.0], 2.0,
89
- [[-16, -16, 48, 48], [-16, 16, 48, 80],
90
- [16, -16, 80, 48], [16, 16, 80, 80], [-32, -32, 96, 96]]),
91
- # # Multi aspect ratio anchor.
101
+ {'5': [[[-16, -16, 48, 48], [-16, 16, 48, 80]],
102
+ [[16, -16, 80, 48], [16, 16, 80, 80]]],
103
+ '6': [[[-32, -32, 96, 96]]]}),
104
+ # Multi aspect ratio anchor.
92
105
  (6, 6, 1, [1.0, 4.0, 0.25], 2.0,
93
- [[-32, -32, 96, 96], [-0, -96, 64, 160], [-96, -0, 160, 64]]),
94
-
106
+ {'6': [[[-32, -32, 96, 96, -0, -96, 64, 160, -96, -0, 160, 64]]]}),
95
107
  )
96
108
  def testAnchorGenerationWithImageSizeAsTensor(self,
97
109
  min_level,
@@ -103,8 +115,25 @@ class AnchorTest(parameterized.TestCase, tf.test.TestCase):
103
115
  image_size = tf.constant([64, 64], tf.int32)
104
116
  anchors = anchor.Anchor(min_level, max_level, num_scales, aspect_ratios,
105
117
  anchor_size, image_size)
106
- boxes = anchors.boxes.numpy()
107
- self.assertEqual(expected_boxes, boxes.tolist())
118
+ self.assertAllClose(expected_boxes, anchors.multilevel_boxes)
119
+
120
+ @parameterized.parameters(
121
+ (6, 8, 2, [1.0, 2.0, 0.5], 3.0, [320, 256]),
122
+ )
123
+ def testAnchorGenerationAreCentered(self, min_level, max_level, num_scales,
124
+ aspect_ratios, anchor_size, image_size):
125
+ anchors = anchor.Anchor(min_level, max_level, num_scales, aspect_ratios,
126
+ anchor_size, image_size)
127
+ multilevel_boxes = anchors.multilevel_boxes
128
+ image_size = np.array(image_size)
129
+ for boxes in multilevel_boxes.values():
130
+ boxes = boxes.numpy()
131
+ box_centers = boxes.mean(axis=0).mean(axis=0)
132
+ box_centers = [
133
+ (box_centers[0] + box_centers[2]) / 2,
134
+ (box_centers[1] + box_centers[3]) / 2,
135
+ ]
136
+ self.assertAllClose(image_size / 2, box_centers)
108
137
 
109
138
  @parameterized.parameters(
110
139
  (3, 6, 2, [1.0], 2.0, False),
@@ -164,6 +193,7 @@ class AnchorTest(parameterized.TestCase, tf.test.TestCase):
164
193
  (3, 7, [.5, 1., 2.], 2, 8, (256, 256)),
165
194
  (3, 8, [1.], 3, 32, (512, 512)),
166
195
  (3, 3, [1.], 2, 4, (32, 32)),
196
+ (4, 8, [.5, 1., 2.], 2, 3, (320, 256)),
167
197
  )
168
198
  def testEquivalentResult(self, min_level, max_level, aspect_ratios,
169
199
  num_scales, anchor_size, image_size):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tf-models-nightly
3
- Version: 2.17.0.dev20240314
3
+ Version: 2.17.0.dev20240321
4
4
  Summary: TensorFlow Official Models
5
5
  Home-page: https://github.com/tensorflow/models
6
6
  Author: Google Inc.
@@ -298,7 +298,7 @@ official/nlp/metrics/__init__.py,sha256=7oiypy0N82PDw9aSdcJBLVoGTd_oRSUOdvuJhMv4
298
298
  official/nlp/metrics/bleu.py,sha256=XOTTbjC3B9lt8-MLvNX02tjA94wfsUVse6KJ5CWPzfk,6587
299
299
  official/nlp/metrics/bleu_test.py,sha256=0j4pZ1MSIcndvUNZa25oXCu4UFOE367KaL7oRNCzLCI,2508
300
300
  official/nlp/modeling/__init__.py,sha256=SQozaRl78tYS6xvGCfM3msABe2VL20x_mL2vIln1Sn0,1062
301
- official/nlp/modeling/layers/__init__.py,sha256=ylm8koecxcv2OtyYxZWr2asrGi1IM9d1seYntRanN4g,4780
301
+ official/nlp/modeling/layers/__init__.py,sha256=no0uyA68hsEDa_UuUdhC4jXeqb4lj-3dp6j0GTDzrIQ,4864
302
302
  official/nlp/modeling/layers/attention.py,sha256=3-jG3m_L9Y41BY35c4uTFG_Ywlfk4SOwUEtmqfSoKkk,3906
303
303
  official/nlp/modeling/layers/attention_test.py,sha256=c7KezuYUze8PWAPuwYow8KTQNRyuuJgwICSsFTyJ2nQ,3536
304
304
  official/nlp/modeling/layers/bigbird_attention.py,sha256=dzutgRoQt2DFsYMpMILv_QF0O_FMDbiLQ3T-7c1Zpcs,21111
@@ -1101,10 +1101,10 @@ official/vision/modeling/layers/roi_generator.py,sha256=ZL-m5IObg8JsXJ_DZ0QQusv2
1101
1101
  official/vision/modeling/layers/roi_sampler.py,sha256=uajmniFsZrmAR_-ojva_dVm9xZU3wze5EqCryP9gUoQ,10006
1102
1102
  official/vision/modeling/models/__init__.py,sha256=9tSOgQ3WTOmg91vIOdiAzpcHCvMJ76nFqCDyQR095k4,1020
1103
1103
  official/vision/ops/__init__.py,sha256=7oiypy0N82PDw9aSdcJBLVoGTd_oRSUOdvuJhMv4leQ,609
1104
- official/vision/ops/anchor.py,sha256=ruGKslsQid6A0yfiJuG6sf_JEbJuHHpWL77D9s3SNx4,19627
1105
- official/vision/ops/anchor_generator.py,sha256=293RH42qBk8zMtfiJovtBJ6Hn9yi4Ln-Az-sGXNAOR0,7274
1106
- official/vision/ops/anchor_generator_test.py,sha256=grVbHuxlu1W7xbjMErV3q0ARxCesHN6q-7LvLcCi-_4,5296
1107
- official/vision/ops/anchor_test.py,sha256=OQPiWHNUkQi3KaMWzOYd9HuqJa9vQ6Za4_6tf79X0qY,7633
1104
+ official/vision/ops/anchor.py,sha256=awu5WOXwTK8H5j2MppnRQcvPHNXSCiBnKcJCnISNfnE,20631
1105
+ official/vision/ops/anchor_generator.py,sha256=cN2k9rw4EpHPGrtSm8Ea51QERPy6WB1XGwYKjAkP8Uw,7276
1106
+ official/vision/ops/anchor_generator_test.py,sha256=qL4LCRr7UzwWlXmWNuS_yVwXe0ygNdMwYOz5T6Mrar4,5302
1107
+ official/vision/ops/anchor_test.py,sha256=PP2UNanwQjdiGxGaL9FZjU0vqTUbOp_Ni_1Kedga9aU,9015
1108
1108
  official/vision/ops/augment.py,sha256=20K-OWV_GLbpzv8sY5rBe6x3H5MIRbTlDWOPTfNlGeY,108290
1109
1109
  official/vision/ops/augment_test.py,sha256=9QISeqJ79JgkI2-Hn4pMxPRRin3mbOd7mSJ4v7S8W6E,25881
1110
1110
  official/vision/ops/box_matcher.py,sha256=AvZd7CUUZnT4FwETLyVz3Uxb3gO-o94OwlZrvm7CtX0,9067
@@ -1203,9 +1203,9 @@ tensorflow_models/__init__.py,sha256=etxw45SHxuwFCRX5qGxGMP83II0JfJulzNl5GSNJvhw
1203
1203
  tensorflow_models/tensorflow_models_test.py,sha256=AxUYUdiQn416UR7jg0h6rmv688esvlKDfpyDCIQkF18,1395
1204
1204
  tensorflow_models/nlp/__init__.py,sha256=4tA5Pf4qaFwT-fIFOpX7x7FHJpnyJT-5UgOeFYTyMlc,807
1205
1205
  tensorflow_models/vision/__init__.py,sha256=zBorY_v5xva1uI-qxhZO3Qh-Dii-Suq6wEYh6hKHDfc,833
1206
- tf_models_nightly-2.17.0.dev20240314.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1207
- tf_models_nightly-2.17.0.dev20240314.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1208
- tf_models_nightly-2.17.0.dev20240314.dist-info/METADATA,sha256=rG9SC_Q5R5o4KDAZzsZhuLMumj4osPggvZYK-8MIPUM,1432
1209
- tf_models_nightly-2.17.0.dev20240314.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1210
- tf_models_nightly-2.17.0.dev20240314.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1211
- tf_models_nightly-2.17.0.dev20240314.dist-info/RECORD,,
1206
+ tf_models_nightly-2.17.0.dev20240321.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1207
+ tf_models_nightly-2.17.0.dev20240321.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1208
+ tf_models_nightly-2.17.0.dev20240321.dist-info/METADATA,sha256=K7uiykWci-B4Hj5RVahhRjPgnNtursErdtdzOEWG-x4,1432
1209
+ tf_models_nightly-2.17.0.dev20240321.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1210
+ tf_models_nightly-2.17.0.dev20240321.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1211
+ tf_models_nightly-2.17.0.dev20240321.dist-info/RECORD,,