tf-models-nightly 2.14.0.dev20231107__py2.py3-none-any.whl → 2.14.0.dev20231109__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,7 +17,7 @@
17
17
  import os
18
18
  import re
19
19
  import sys
20
- import unittest
20
+ import googletest
21
21
 
22
22
  from absl import flags
23
23
  from absl.testing import flagsaver
@@ -100,20 +100,20 @@ class TransformerTaskTest(tf.test.TestCase):
100
100
  t = transformer_main.TransformerTask(FLAGS)
101
101
  t.train()
102
102
 
103
- @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
103
+ @googletest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
104
104
  def test_train_1_gpu_with_dist_strat(self):
105
105
  FLAGS.distribution_strategy = 'one_device'
106
106
  t = transformer_main.TransformerTask(FLAGS)
107
107
  t.train()
108
108
 
109
- @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
109
+ @googletest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
110
110
  def test_train_fp16(self):
111
111
  FLAGS.distribution_strategy = 'one_device'
112
112
  FLAGS.dtype = 'fp16'
113
113
  t = transformer_main.TransformerTask(FLAGS)
114
114
  t.train()
115
115
 
116
- @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
116
+ @googletest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
117
117
  def test_train_2_gpu(self):
118
118
  if context.num_gpus() < 2:
119
119
  self.skipTest(
@@ -125,7 +125,7 @@ class TransformerTaskTest(tf.test.TestCase):
125
125
  t = transformer_main.TransformerTask(FLAGS)
126
126
  t.train()
127
127
 
128
- @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
128
+ @googletest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
129
129
  def test_train_2_gpu_fp16(self):
130
130
  if context.num_gpus() < 2:
131
131
  self.skipTest(
@@ -171,7 +171,7 @@ class TransformerTaskTest(tf.test.TestCase):
171
171
  t = transformer_main.TransformerTask(FLAGS)
172
172
  t.predict()
173
173
 
174
- @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
174
+ @googletest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
175
175
  def test_predict_fp16(self):
176
176
  if context.num_gpus() >= 2:
177
177
  self.skipTest('No need to test 2+ GPUs without a distribution strategy.')
@@ -14,7 +14,7 @@
14
14
 
15
15
  """Tests for ops."""
16
16
 
17
- from unittest import mock
17
+ from googletest import mock
18
18
 
19
19
  import tensorflow as tf, tf_keras
20
20
 
@@ -13,7 +13,7 @@
13
13
  # limitations under the License.
14
14
 
15
15
  """Downloads and prepares TriviaQA dataset."""
16
- from unittest import mock
16
+ from googletest import mock
17
17
 
18
18
  from absl import app
19
19
  from absl import flags
@@ -0,0 +1,14 @@
1
+ # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
@@ -0,0 +1,14 @@
1
+ # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
@@ -0,0 +1,347 @@
1
+ # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Utility functions for the automated mark generation script."""
16
+
17
+ import random
18
+ from typing import Any
19
+ import imantics
20
+ import matplotlib.pyplot as plt
21
+ import numpy as np
22
+
23
+
24
+ def plot_image(image: np.ndarray):
25
+ """Plots a mask image.
26
+
27
+ Args:
28
+ image: A numpy array of shape (height, width) representing a mask.
29
+ """
30
+ plt.figure(figsize=(24, 32))
31
+ plt.imshow(image, cmap='gray')
32
+ plt.show()
33
+
34
+
35
+ def _show_anns(anns: list[dict[str, Any]]):
36
+ """Displays annotations on an image.
37
+
38
+ Args:
39
+ anns: A list of dictionaries representing annotations.
40
+
41
+ Returns:
42
+ None.
43
+ """
44
+ if not anns:
45
+ return
46
+ sorted_anns = sorted(anns, key=lambda x: x['area'], reverse=True)
47
+ ax = plt.gca()
48
+ ax.set_autoscale_on(False)
49
+ for ann in sorted_anns:
50
+ m = ann['segmentation']
51
+ img = np.ones((m.shape[0], m.shape[1], 3))
52
+ random.seed()
53
+ color_mask = np.random.random((1, 3)).tolist()[0]
54
+ for i in range(3):
55
+ img[:, :, i] = color_mask[i]
56
+ ax.imshow(np.dstack((img, m * 0.35)))
57
+
58
+
59
+ def display_image_with_annotations(
60
+ image: np.ndarray, masks: list[dict[str, Any]]
61
+ ):
62
+ """Displays an image with annotations.
63
+
64
+ Args:
65
+ image: A numpy array of shape (height, width, 3) representing an image.
66
+ masks: A list of dictionaries representing masks.
67
+
68
+ Returns:
69
+ None.
70
+ """
71
+ plt.figure(figsize=(24, 32))
72
+ plt.imshow(image)
73
+ _show_anns(masks)
74
+ plt.axis('off')
75
+ plt.show()
76
+
77
+
78
+ def plot_grid(images: list[np.ndarray], n_cols: int):
79
+ """Plots a list of images in a grid with a given number of images per row.
80
+
81
+ Args:
82
+ images: A list of numpy arrays representing images.
83
+ n_cols: The number of images per row.
84
+
85
+ Returns:
86
+ None.
87
+ """
88
+ images = [np.array(item['segmentation'], dtype=float) for item in images]
89
+ n_rows = int(np.ceil(len(images) / n_cols))
90
+ _, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=(10, 10))
91
+ axes = axes.flatten()
92
+
93
+ for i, image in enumerate(images):
94
+ axes[i].imshow(image, cmap='gray')
95
+ axes[i].set_axis_off()
96
+
97
+ plt.tight_layout()
98
+ plt.show()
99
+
100
+
101
+ def convert_bbox_format(coord: list[float]) -> list[float]:
102
+ """Convert bounding box format.
103
+
104
+ Convert bounding box coordinates from x, y, width, height to
105
+ xmin, ymin, xmax, ymax format.
106
+
107
+ Args:
108
+ coord: A list or tuple containing the coordinates in (x, y, width, height)
109
+ format.
110
+
111
+ Returns:
112
+ A list containing the coordinates in (xmin, ymin, xmax, ymax) format.
113
+ """
114
+ xmin, ymin, width, height = coord
115
+ return [xmin, ymin, xmin + width, ymin + height]
116
+
117
+
118
+ def _aspect_ratio(bbox: list[float]) -> float:
119
+ """Calculate the aspect ratio of a bounding box.
120
+
121
+ Args:
122
+ bbox: A list or tuple containing the coordinates in (xmin, ymin, xmax, ymax)
123
+ format.
124
+
125
+ Returns:
126
+ The aspect ratio, defined as the length of the longer side
127
+ divided by the length of the shorter side.
128
+ """
129
+ xmin, ymin, xmax, ymax = bbox
130
+ width, height = xmax - xmin, ymax - ymin
131
+ return max(width, height) / min(width, height)
132
+
133
+
134
+ def _calculate_area_bounds(
135
+ elements: list[np.ndarray], upper_multiplier: int, lower_multiplier: int
136
+ ) -> tuple[float, float]:
137
+ """Calculate the upper and lower bounds for a specified key.
138
+
139
+ Args:
140
+ elements: A list of elements containing the specified key.
141
+ upper_multiplier: Multiplier to calculate the upper bound of IQR.
142
+ lower_multiplier: Mulitplier to calculate the lower bound of IQR.
143
+
144
+ Returns:
145
+ A tuple containing the upper and lower bounds.
146
+ """
147
+ leng = [i['area'] for i in elements]
148
+
149
+ q1, _, q3 = np.percentile(leng, [25, 50, 75])
150
+ iqr = q3 - q1
151
+ upper_bound = q3 + upper_multiplier * iqr
152
+ lower_bound = q1 * lower_multiplier
153
+ return upper_bound, lower_bound
154
+
155
+
156
+ def filter_masks(
157
+ image: np.ndarray,
158
+ elements: list[np.ndarray],
159
+ upper_multiplier: int,
160
+ lower_multiplier: int,
161
+ area_ratio_threshold: float,
162
+ ) -> list[np.ndarray]:
163
+ """Filter masks based on area bounds and aspect ratio.
164
+
165
+ Args:
166
+ image: Original image
167
+ elements: List of elements with multiple attributes.
168
+ upper_multiplier: Multiplier to calculate the upper bound of IQR.
169
+ lower_multiplier: Mulitplier to calculate the lower bound of IQR.
170
+ area_ratio_threshold: Threshold for the ratio of mask area to image area.
171
+
172
+ Returns:
173
+ List of filtered masks.
174
+ """
175
+ area_upper_bound, area_lower_bound = _calculate_area_bounds(
176
+ elements, upper_multiplier, lower_multiplier
177
+ )
178
+ threshold = area_ratio_threshold * np.prod(image.shape[:-1])
179
+ filtered_elements = []
180
+ for element in elements:
181
+ if (
182
+ area_lower_bound <= element['area'] <= area_upper_bound
183
+ and _aspect_ratio(element['bbox']) <= 2
184
+ and element['area'] <= threshold
185
+ ):
186
+ filtered_elements.append(element)
187
+ return filtered_elements
188
+
189
+
190
+ def _calculate_intersection_score(
191
+ elem1: dict[str, Any], elem2: dict[str, Any]
192
+ ) -> float:
193
+ """Calculates the intersection score for two masks.
194
+
195
+ Args:
196
+ elem1: The first element.
197
+ elem2: The second element.
198
+
199
+ Returns:
200
+ The intersection score calculated as the ratio of the intersection
201
+ area to the area of the smaller mask.
202
+ """
203
+
204
+ # Check if the masks have the same dimensions.
205
+ if elem1['segmentation'].shape != elem2['segmentation'].shape:
206
+ raise ValueError('The masks must have the same dimensions.')
207
+
208
+ min_elem = elem1 if elem1['area'] < elem2['area'] else elem2
209
+ intersection = np.logical_and(elem1['segmentation'], elem2['segmentation'])
210
+ score = np.sum(intersection) / np.sum(min_elem['segmentation'])
211
+ return score
212
+
213
+
214
+ def filter_nested_similar_masks(
215
+ elements: list[dict[str, Any]]
216
+ ) -> list[dict[str, Any]]:
217
+ """Filters out nested masks from a list of elements.
218
+
219
+ Args:
220
+ elements: A list of dictionaries representing elements.
221
+
222
+ Returns:
223
+ A list of dictionaries representing elements with nested masks filtered out.
224
+ """
225
+ retained_elements = []
226
+ handled_indices = (
227
+ set()
228
+ ) # To keep track of indices that have already been handled
229
+
230
+ for i, elem in enumerate(elements):
231
+ if i in handled_indices:
232
+ continue # Skip elements that have already been handled
233
+
234
+ matching_indices = [i] # Start with the current element
235
+
236
+ # Find all elements that match with the current element
237
+ for j, other_elem in enumerate(elements):
238
+ if i != j and _calculate_intersection_score(elem, other_elem) > 0.95:
239
+ matching_indices.append(j)
240
+
241
+ # If more than one element matched, find the one with the highest 'area'
242
+ # and add it to retained_elements
243
+ if len(matching_indices) > 1:
244
+ highest_area_index = max(
245
+ matching_indices, key=lambda idx: elements[idx]['area']
246
+ )
247
+ retained_elements.append(elements[highest_area_index])
248
+ handled_indices.update(
249
+ matching_indices
250
+ ) # Mark all matching indices as handled
251
+ else:
252
+ # If no matches were found, retain the current element
253
+ retained_elements.append(elem)
254
+ handled_indices.add(i) # Mark the current index as handled
255
+
256
+ return retained_elements
257
+
258
+
259
+ def generate_coco_json(
260
+ masks: list[np.ndarray],
261
+ image: np.ndarray,
262
+ category_name: str,
263
+ file_name: str,
264
+ ) -> dict[str, Any]:
265
+ """Generates a COCO JSON annotation.
266
+
267
+ Create a COCO formatted JSON file for the given masks, image, and
268
+ category name.
269
+
270
+ Args:
271
+ masks: A list of masks.
272
+ image: The image to which the masks correspond.
273
+ category_name: The name of the category for the masks.
274
+ file_name: The name of the file to save the COCO JSON to.
275
+
276
+ Returns:
277
+ A COCO JSON dictionary.
278
+ """
279
+ height, width = image.shape[:2]
280
+
281
+ # Initialize variables
282
+ mask = np.zeros((height, width), dtype=np.uint8)
283
+ images_dict = [{}]
284
+ categories_dict = [{}]
285
+ annotations_dict = []
286
+ annotation_id = 1
287
+
288
+ # Process masks
289
+ for sub_mask in masks:
290
+ # Convert mask to numpy array
291
+ mask_array = sub_mask.reshape(height, width).astype(np.uint8)
292
+
293
+ # Create Mask object and add it to the imantics_Image
294
+ mask_image = imantics.Mask(mask_array)
295
+
296
+ # Create imantics_Image object
297
+ imantics_image = imantics.Image(image)
298
+ imantics_image.add(mask_image, category=imantics.Category(category_name))
299
+
300
+ try:
301
+ # Export imantics_Image as COCO JSON
302
+ coco_json = imantics_image.export(style='coco')
303
+ except imantics.ExportError as exc:
304
+ print('Error:', exc)
305
+ continue
306
+
307
+ # Update images_dict and categories_dict
308
+ images_dict[0] = coco_json['images'][0]
309
+ categories_dict[0] = coco_json['categories'][0]
310
+
311
+ # Retrieve annotation information and modify the segmentation field
312
+ annotation = coco_json['annotations'][0]
313
+ annotation['segmentation'] = [max(annotation['segmentation'], key=len)]
314
+
315
+ # Check for valid segmentations and create annotation dictionary
316
+ if len(annotation['segmentation']) >= 1:
317
+ for segmentation in annotation['segmentation']:
318
+ if len(segmentation) > 4:
319
+ annotation_dict = {
320
+ 'id': annotation_id,
321
+ 'image_id': annotation['image_id'],
322
+ 'category_id': annotation['category_id'],
323
+ 'iscrowd': annotation['iscrowd'],
324
+ 'area': annotation['area'],
325
+ 'bbox': annotation['bbox'],
326
+ 'segmentation': [segmentation],
327
+ }
328
+ annotations_dict.append(annotation_dict)
329
+ annotation_id += 1
330
+
331
+ # Free up memory
332
+ del mask_image, coco_json, imantics_image
333
+
334
+ # Add mask_array to the overall mask
335
+ mask += mask_array
336
+
337
+ # assign file name
338
+ images_dict[0]['file_name'] = file_name
339
+
340
+ # Create final COCO dictionary
341
+ coco_dict_final = {
342
+ 'images': images_dict,
343
+ 'categories': categories_dict,
344
+ 'annotations': annotations_dict,
345
+ }
346
+
347
+ return coco_dict_final
@@ -0,0 +1,165 @@
1
+ # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import numpy as np
16
+ import tensorflow as tf, tf_keras
17
+ from official.projects.waste_identification_ml.data_generation import utils
18
+
19
+
20
+ def compare_elements(elem_list1, elem_list2):
21
+ if len(elem_list1) != len(elem_list2):
22
+ return False
23
+
24
+ for elem1, elem2 in zip(elem_list1, elem_list2):
25
+ for key in elem1:
26
+ if key not in elem2:
27
+ return False
28
+ if isinstance(elem1[key], np.ndarray) or isinstance(
29
+ elem2[key], np.ndarray
30
+ ):
31
+ if not np.array_equal(elem1[key], elem2[key]):
32
+ return False
33
+ else:
34
+ if elem1[key] != elem2[key]:
35
+ return False
36
+
37
+ return True
38
+
39
+
40
+ class MyTest(tf.test.TestCase):
41
+
42
+ def test_convert_coordinates(self):
43
+ coord = [10.0, 20.0, 30.0, 40.0]
44
+ expected_output = [10.0, 20.0, 40.0, 60.0]
45
+ actual_output = utils.convert_bbox_format(coord)
46
+ self.assertEqual(expected_output, actual_output)
47
+
48
+ def test_area_key(self):
49
+ masks = [{'area': 10.0}, {'area': 20.0}, {'area': 30.0}]
50
+ upper_multiplier = 1.5
51
+ lower_multiplier = 0.5
52
+ leng = [i['area'] for i in masks]
53
+ q1, _, q3 = np.percentile(leng, [25, 50, 75])
54
+ iqr = q3 - q1
55
+ expected_upper_bound = q3 + upper_multiplier * iqr
56
+ expected_lower_bound = q1 * lower_multiplier
57
+ actual_upper_bound, actual_lower_bound = utils._calculate_area_bounds(
58
+ masks, upper_multiplier, lower_multiplier
59
+ )
60
+ self.assertEqual(
61
+ (expected_upper_bound, expected_lower_bound),
62
+ (actual_upper_bound, actual_lower_bound),
63
+ )
64
+
65
+ def test_square_bbox(self):
66
+ bbox = [0.0, 0.0, 2.0, 2.0]
67
+ expected_ratio = 1.0
68
+ actual_ratio = utils._aspect_ratio(bbox)
69
+ self.assertEqual(expected_ratio, actual_ratio)
70
+
71
+ def test_same_size_masks(self):
72
+ elem1 = {
73
+ 'segmentation': np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
74
+ 'area': 3,
75
+ }
76
+
77
+ elem2 = {
78
+ 'segmentation': np.array([[1, 1, 0], [0, 0, 0], [0, 0, 1]]),
79
+ 'area': 3,
80
+ }
81
+
82
+ expected_score = 2.0 / 3.0 # Intersection is 2, smaller mask area is 3
83
+ actual_score = utils._calculate_intersection_score(elem1, elem2)
84
+ self.assertAlmostEqual(expected_score, actual_score)
85
+
86
+ def test_different_size_masks_error(self):
87
+ elem1 = {'segmentation': np.array([[1, 0], [0, 1]]), 'area': 2}
88
+
89
+ elem2 = {
90
+ 'segmentation': np.array([[1, 1, 0], [0, 0, 0], [0, 0, 1]]),
91
+ 'area': 3,
92
+ }
93
+
94
+ with self.assertRaises(ValueError) as context:
95
+ utils._calculate_intersection_score(elem1, elem2)
96
+
97
+ self.assertEqual(
98
+ str(context.exception), 'The masks must have the same dimensions.'
99
+ )
100
+
101
+
102
+ class TestFilterNestedSimilarMasks(tf.test.TestCase):
103
+
104
+ def setUp(self):
105
+ super().setUp()
106
+ # Create some mock binary mask data to use in the tests
107
+ self.mask1 = np.array([[0, 1], [1, 0]])
108
+ self.mask2 = np.array([[1, 0], [0, 1]])
109
+ self.larger_mask = np.array([[1, 1], [1, 1]])
110
+
111
+ def test_same_size_masks(self):
112
+ # Test the case where all masks are of the same size
113
+ elements = [
114
+ {'segmentation': self.mask1, 'area': 2},
115
+ {'segmentation': self.mask2, 'area': 2},
116
+ ]
117
+ expected_output = elements # All masks are retained as none are nested
118
+ actual_output = utils.filter_nested_similar_masks(elements)
119
+ self.assertEqual(actual_output, expected_output)
120
+
121
+ def test_nested_masks(self):
122
+ # Test the case where one mask is nested within another
123
+ elements = [
124
+ {'segmentation': self.mask1, 'area': 2},
125
+ {'segmentation': self.larger_mask, 'area': 4},
126
+ ]
127
+ expected_output = [{
128
+ 'segmentation': self.larger_mask,
129
+ 'area': 4,
130
+ }] # Only the larger mask is retained
131
+ actual_output = utils.filter_nested_similar_masks(elements)
132
+ self.assertEqual(actual_output, expected_output)
133
+
134
+
135
+ class TestGenerateCocoJson(tf.test.TestCase):
136
+
137
+ def setUp(self):
138
+ super().setUp()
139
+ self.image = np.array([[0, 1], [1, 0]])
140
+ self.masks = [np.array([[0, 1], [1, 0]]), np.array([[1, 0], [0, 1]])]
141
+ self.category_name = 'example_category'
142
+ self.file_name = 'example_file'
143
+
144
+ def test_generate_coco_json(self):
145
+ coco_dict = utils.generate_coco_json(
146
+ masks=self.masks,
147
+ image=self.image,
148
+ category_name=self.category_name,
149
+ file_name=self.file_name,
150
+ )
151
+
152
+ # Check the keys present in the output dictionary
153
+ self.assertIn('images', coco_dict)
154
+ self.assertIn('categories', coco_dict)
155
+ self.assertIn('annotations', coco_dict)
156
+
157
+ # Check the file name in the images dictionary
158
+ self.assertEqual(coco_dict['images'][0]['file_name'], self.file_name)
159
+
160
+ # Check the category name in the categories dictionary
161
+ self.assertEqual(coco_dict['categories'][0]['name'], self.category_name)
162
+
163
+
164
+ if __name__ == '__main__':
165
+ tf.test.main()
@@ -18,7 +18,7 @@ from __future__ import absolute_import
18
18
  from __future__ import division
19
19
  from __future__ import print_function
20
20
 
21
- import unittest
21
+ import googletest
22
22
 
23
23
  import tensorflow as tf, tf_keras
24
24
  from tensorflow.python.eager import context # pylint: disable=ungrouped-imports
@@ -50,7 +50,7 @@ class NcfTest(tf.test.TestCase):
50
50
 
51
51
  _BASE_END_TO_END_FLAGS = ['-batch_size', '1044', '-train_epochs', '1']
52
52
 
53
- @unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100)
53
+ @googletest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100)
54
54
  def test_end_to_end_keras_no_dist_strat(self):
55
55
  integration.run_synthetic(
56
56
  ncf_keras_main.main,
@@ -58,14 +58,14 @@ class NcfTest(tf.test.TestCase):
58
58
  extra_flags=self._BASE_END_TO_END_FLAGS +
59
59
  ['-distribution_strategy', 'off'])
60
60
 
61
- @unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100)
61
+ @googletest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100)
62
62
  def test_end_to_end_keras_dist_strat(self):
63
63
  integration.run_synthetic(
64
64
  ncf_keras_main.main,
65
65
  tmp_root=self.get_temp_dir(),
66
66
  extra_flags=self._BASE_END_TO_END_FLAGS + ['-num_gpus', '0'])
67
67
 
68
- @unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100)
68
+ @googletest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100)
69
69
  def test_end_to_end_keras_dist_strat_ctl(self):
70
70
  flags = (
71
71
  self._BASE_END_TO_END_FLAGS + ['-num_gpus', '0'] +
@@ -73,7 +73,7 @@ class NcfTest(tf.test.TestCase):
73
73
  integration.run_synthetic(
74
74
  ncf_keras_main.main, tmp_root=self.get_temp_dir(), extra_flags=flags)
75
75
 
76
- @unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100)
76
+ @googletest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100)
77
77
  def test_end_to_end_keras_1_gpu_dist_strat_fp16(self):
78
78
  if context.num_gpus() < 1:
79
79
  self.skipTest(
@@ -86,7 +86,7 @@ class NcfTest(tf.test.TestCase):
86
86
  extra_flags=self._BASE_END_TO_END_FLAGS +
87
87
  ['-num_gpus', '1', '--dtype', 'fp16'])
88
88
 
89
- @unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100)
89
+ @googletest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100)
90
90
  def test_end_to_end_keras_1_gpu_dist_strat_ctl_fp16(self):
91
91
  if context.num_gpus() < 1:
92
92
  self.skipTest(
@@ -99,7 +99,7 @@ class NcfTest(tf.test.TestCase):
99
99
  extra_flags=self._BASE_END_TO_END_FLAGS +
100
100
  ['-num_gpus', '1', '--dtype', 'fp16', '--keras_use_ctl'])
101
101
 
102
- @unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100)
102
+ @googletest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100)
103
103
  def test_end_to_end_keras_2_gpu_fp16(self):
104
104
  if context.num_gpus() < 2:
105
105
  self.skipTest(
@@ -12,7 +12,7 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- import unittest
15
+ import googletest
16
16
 
17
17
  from absl import flags
18
18
  import tensorflow as tf, tf_keras
@@ -39,7 +39,7 @@ def define_flags():
39
39
  flags_core.define_benchmark()
40
40
 
41
41
 
42
- class BaseTester(unittest.TestCase):
42
+ class BaseTester(googletest.TestCase):
43
43
 
44
44
  @classmethod
45
45
  def setUpClass(cls):
@@ -159,4 +159,4 @@ class BaseTester(unittest.TestCase):
159
159
 
160
160
 
161
161
  if __name__ == "__main__":
162
- unittest.main()
162
+ googletest.main()
@@ -14,7 +14,7 @@
14
14
 
15
15
  """Tests for image_utils."""
16
16
  import imghdr
17
- from unittest import mock
17
+ from googletest import mock
18
18
  from absl.testing import parameterized
19
19
  import tensorflow as tf, tf_keras
20
20
 
@@ -14,7 +14,7 @@
14
14
 
15
15
  """Tests for dense_prediction_heads.py."""
16
16
 
17
- import unittest
17
+ import googletest
18
18
 
19
19
  # Import libraries
20
20
 
@@ -151,7 +151,7 @@ class RetinaNetHeadTest(parameterized.TestCase, tf.test.TestCase):
151
151
  self.assertLen(retinanet_head._att_convs['depth'], 1)
152
152
  self.assertEqual(retinanet_head._att_convs['depth'][0].filters, 128)
153
153
 
154
- @unittest.expectedFailure
154
+ @googletest.expectedFailure
155
155
  def test_forward_shared_prediction_tower_with_share_classification_heads(
156
156
  self):
157
157
  share_classification_heads = True
@@ -15,7 +15,7 @@
15
15
  """Tests for official.core.export_saved_model_lib."""
16
16
 
17
17
  import os
18
- from unittest import mock
18
+ from googletest import mock
19
19
 
20
20
  import tensorflow as tf, tf_keras
21
21
 
orbit/controller.py CHANGED
@@ -321,6 +321,7 @@ class Controller:
321
321
  _log(f" eval | step: {current_step: 6d} | {steps_msg}")
322
322
 
323
323
  start = time.time()
324
+ assert isinstance(self.evaluator, runner.AbstractEvaluator)
324
325
  with self.eval_summary_manager.summary_writer().as_default():
325
326
  steps_tensor = tf.convert_to_tensor(steps, dtype=tf.int32)
326
327
  eval_output = self.evaluator.evaluate(steps_tensor)
@@ -428,6 +429,7 @@ class Controller:
428
429
  self._require("checkpoint_manager", for_method="evaluate_continuously")
429
430
 
430
431
  output = None
432
+ assert isinstance(self.checkpoint_manager, tf.train.CheckpointManager)
431
433
  for checkpoint_path in tf.train.checkpoints_iterator(
432
434
  self.checkpoint_manager.directory,
433
435
  timeout=timeout,
@@ -451,6 +453,7 @@ class Controller:
451
453
  """
452
454
  self._require("checkpoint_manager", for_method="restore_checkpoint")
453
455
 
456
+ assert isinstance(self.checkpoint_manager, tf.train.CheckpointManager)
454
457
  with self.strategy.scope():
455
458
  # Checkpoint restoring should be inside scope (b/139450638).
456
459
  if checkpoint_path is not None:
@@ -508,6 +511,7 @@ class Controller:
508
511
  if self.summary_interval:
509
512
  # Create a predicate to determine when summaries should be written.
510
513
  should_record = lambda: (self.global_step % self.summary_interval == 0)
514
+ assert isinstance(self.trainer, runner.AbstractTrainer)
511
515
  with tf.summary.record_if(should_record):
512
516
  num_steps_tensor = tf.convert_to_tensor(num_steps, dtype=tf.int32)
513
517
  train_output = self.trainer.train(num_steps_tensor)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tf-models-nightly
3
- Version: 2.14.0.dev20231107
3
+ Version: 2.14.0.dev20231109
4
4
  Summary: TensorFlow Official Models
5
5
  Home-page: https://github.com/tensorflow/models
6
6
  Author: Google Inc.
@@ -164,7 +164,7 @@ official/legacy/transformer/transformer.py,sha256=_tVlFGHEIl3HJIsXY6ukKxUEvLCOrN
164
164
  official/legacy/transformer/transformer_forward_test.py,sha256=9Vax1XOz6QduG974EjI_Cw6P7reL8BLWzQf9yLsuPYI,6070
165
165
  official/legacy/transformer/transformer_layers_test.py,sha256=ToNwLMbPq2Qj2jHk3CYpMeRxCD6OT_0hfzBPBZZJx0I,3576
166
166
  official/legacy/transformer/transformer_main.py,sha256=TmGn-clD8X2HxWBJhByNbY8SmR6fbNbJKEDBwb3FsvY,18193
167
- official/legacy/transformer/transformer_main_test.py,sha256=7FSAtNgrttQDbXw_luRTB8UOSKM-cSKijz2tP0o2nWg,6641
167
+ official/legacy/transformer/transformer_main_test.py,sha256=fvktcJ4jynelavfNmxJDG7vnm0EoSusYvtW-mkDVBjY,6653
168
168
  official/legacy/transformer/transformer_test.py,sha256=JYfcQ7IuSUGao2cOlt2PoDX3jXsdq8Po0k-BXyFf-Nc,3638
169
169
  official/legacy/transformer/translate.py,sha256=iin47QkofS4HFyqYMDJ5flDNY-FUBrNMeTpfhtCnaz8,6961
170
170
  official/legacy/transformer/utils/__init__.py,sha256=1ToRMjre4mErL4Ek4_dMVxMjXNPossNXggV8fqbISao,609
@@ -247,7 +247,7 @@ official/modeling/privacy/__init__.py,sha256=1ToRMjre4mErL4Ek4_dMVxMjXNPossNXggV
247
247
  official/modeling/privacy/configs.py,sha256=CTRX7ZWvUdWAg0-dO-TSxIzbODtACMRGbrzE1oeyIpk,960
248
248
  official/modeling/privacy/configs_test.py,sha256=Eps3m0ZGumD_G9hHQ7TQXO2ppEH4CbSP1e21VIJJRCQ,1369
249
249
  official/modeling/privacy/ops.py,sha256=J5innS29_WuFFujBMBuwyWvHC1GOwtQ7wLQCW-ldCvY,2054
250
- official/modeling/privacy/ops_test.py,sha256=JsDB6T6Ja2FHT6fVW91i_HjG20Z7Rf5Q5246wwj2bgs,1694
250
+ official/modeling/privacy/ops_test.py,sha256=ZX-e66pezA4q7M7OHUJvyw4zvlsWoHcnxvgffH_UDLk,1696
251
251
  official/nlp/__init__.py,sha256=1ToRMjre4mErL4Ek4_dMVxMjXNPossNXggV8fqbISao,609
252
252
  official/nlp/continuous_finetune_lib.py,sha256=E9hzs58s9f9MefJ2TPJF2yQD0Dz2a__BjdJfHBhwEws,7894
253
253
  official/nlp/continuous_finetune_lib_test.py,sha256=1Tjv2zBwLhvCUjBY7L7HNSnCW2DQbHZeda3UDgf8Q0g,3192
@@ -689,7 +689,7 @@ official/projects/teams/teams_task_test.py,sha256=Ni1LXqfqATs0-I2pjCBSbviEGIGHER
689
689
  official/projects/teams/train.py,sha256=pp3KbKVddgyOiyv7G5H31WY32hzgD7Wi3yMV6355Qj8,1028
690
690
  official/projects/triviaqa/__init__.py,sha256=1ToRMjre4mErL4Ek4_dMVxMjXNPossNXggV8fqbISao,609
691
691
  official/projects/triviaqa/dataset.py,sha256=cUc7z1W_GZxDNUlqRdSPSSmJbQGYatUQ7WZ-l10B9I8,16330
692
- official/projects/triviaqa/download_and_prepare.py,sha256=6eFsChzJY_f-kBWWMI9QSymwwfqFALDD9OUyqNU7UBE,2497
692
+ official/projects/triviaqa/download_and_prepare.py,sha256=s4xpkWcHdjJHSNWi9j31siHOau8fp1FKCw_7aF4AFno,2499
693
693
  official/projects/triviaqa/evaluate.py,sha256=vTaG-9COi6pJIcE2B8T8vFRhsVBWQqDDyvWuuYwgTvQ,1560
694
694
  official/projects/triviaqa/evaluation.py,sha256=PRp_zZaGKXc7v7YLhG9Jd55lV9PPzQB_I3vVvoPa9mQ,5293
695
695
  official/projects/triviaqa/inputs.py,sha256=RBZsTJO8u0pV-mbM2HQdMiPUSbcbVLTceGY-nuFbLG4,21728
@@ -760,6 +760,10 @@ official/projects/volumetric_models/serving/semantic_segmentation_3d_test.py,sha
760
760
  official/projects/volumetric_models/tasks/__init__.py,sha256=1ToRMjre4mErL4Ek4_dMVxMjXNPossNXggV8fqbISao,609
761
761
  official/projects/volumetric_models/tasks/semantic_segmentation_3d.py,sha256=tpf9C3Bok1HuEQsCkwfVdhck7d1Yz_4KEF5QX-3C5co,12907
762
762
  official/projects/volumetric_models/tasks/semantic_segmentation_3d_test.py,sha256=hEOjpOir5NcMYwAl6LeXPncEp0JinQA8yrYnvzzlzbA,4079
763
+ official/projects/waste_identification_ml/__init__.py,sha256=1ToRMjre4mErL4Ek4_dMVxMjXNPossNXggV8fqbISao,609
764
+ official/projects/waste_identification_ml/data_generation/__init__.py,sha256=1ToRMjre4mErL4Ek4_dMVxMjXNPossNXggV8fqbISao,609
765
+ official/projects/waste_identification_ml/data_generation/utils.py,sha256=85rOM15OqWlby5PAmWVB76NmvvEhwb-5a1WraE9XOKY,10239
766
+ official/projects/waste_identification_ml/data_generation/utils_test.py,sha256=l15BZ_iBnfYPZfo-ZdTu1Jz1kyt8RQhdB5UwcLjMoEo,5340
763
767
  official/projects/yolo/__init__.py,sha256=1ToRMjre4mErL4Ek4_dMVxMjXNPossNXggV8fqbISao,609
764
768
  official/projects/yolo/train.py,sha256=xd5zcPugSTUfa-g4q2xa1XWQjvqVpb0Y966V1PSHDX0,981
765
769
  official/projects/yolo/common/__init__.py,sha256=1ToRMjre4mErL4Ek4_dMVxMjXNPossNXggV8fqbISao,609
@@ -863,7 +867,7 @@ official/recommendation/movielens.py,sha256=XiHTJFUV2C1TgtoYmnOXP7QlZifokzsom6cj
863
867
  official/recommendation/ncf_common.py,sha256=ZSNfSTABB22zMR98qNiCTD0mTX3bbgB156mTjWuqliI,12288
864
868
  official/recommendation/ncf_input_pipeline.py,sha256=oq93PYokRWO8QYro3kyUiQiNtIhwOFkt6Y56hcY-9Gk,6974
865
869
  official/recommendation/ncf_keras_main.py,sha256=40Voy__CDI8hAKBgpf1t4Ox_gaXma4meSofGU1yQzeA,19850
866
- official/recommendation/ncf_test.py,sha256=wvdrYBl2_ttopTqDD99cfDFWjizo6-ZxIwR2z-xT9Xg,4144
870
+ official/recommendation/ncf_test.py,sha256=EklHNCWErt-7VVrUAQwSRuc1n9Zq92A3bHHCNna2xu8,4158
867
871
  official/recommendation/neumf_model.py,sha256=ihScBcDEAALSPe8MITVxVk9Tqjcyi5U0dZlRnHTzmBM,16950
868
872
  official/recommendation/popen_helper.py,sha256=zJYedoghCk7VkEevTpqQpNXi86EVpa3I2SFq55x4tYE,1917
869
873
  official/recommendation/stat_utils.py,sha256=8cKlTjPKEm0m2as2vU7U5ZjWwr5CyRCdGxNzvJni1JE,3076
@@ -893,7 +897,7 @@ official/utils/flags/_distribution.py,sha256=oW6lYPw4y865JnEHBMPxtTjeM2lU6KZI8Kp
893
897
  official/utils/flags/_misc.py,sha256=3A1jTj2sQjGGK4hDp5kQ1vREz-OTlaYJ22mXY72aW7M,1541
894
898
  official/utils/flags/_performance.py,sha256=mIusa796tqpZiOAnU4RmvzGsh0-1f11n4foLoC9-f3w,11576
895
899
  official/utils/flags/core.py,sha256=Tjpanan5CHAY8xsvalsOOk28UXCSy1C-Gaza4DVPbmM,4427
896
- official/utils/flags/flags_test.py,sha256=DicN_ySMkGCIFi8S4O2hFfRbGpKMMMa7tT0bkKSRQ98,5312
900
+ official/utils/flags/flags_test.py,sha256=eGuudhPYmRe_YVEb5gPlu4zBYgwqsje0kUcjzatwY-Q,5318
897
901
  official/utils/misc/__init__.py,sha256=1ToRMjre4mErL4Ek4_dMVxMjXNPossNXggV8fqbISao,609
898
902
  official/utils/misc/keras_utils.py,sha256=Gsp3_bWkajf-WdJ7BDaezE1MDuxAYWX_zMd7zc-Ddk4,7793
899
903
  official/utils/misc/model_helpers.py,sha256=ADK6ar8m68S-9Rez95KbG2V7rfOY94j1iMR8vKJ45zc,3370
@@ -924,7 +928,7 @@ official/vision/data/__init__.py,sha256=1ToRMjre4mErL4Ek4_dMVxMjXNPossNXggV8fqbI
924
928
  official/vision/data/create_coco_tf_record.py,sha256=5QGsS9TBbxaPrTWAfV2NujiNzF29CCwXb_g0wNhEavY,22742
925
929
  official/vision/data/fake_feature_generator.py,sha256=vyIp45OMZuXAtaJ33wFtSSUUs2dbo6RANlWUaq9Jtio,5039
926
930
  official/vision/data/image_utils.py,sha256=nB6_kZaH4uLKsKI-RSUMu9uHaEUZzEIO1dC4rCXiU_c,3503
927
- official/vision/data/image_utils_test.py,sha256=GojaoHszDlqBB6sZWL2JvjXFAaVmF8hCSrIVpnd0ZeU,4071
931
+ official/vision/data/image_utils_test.py,sha256=7CI0fbiS5UuwfJ4eUeYJMA73W_qgEXNIT3EqrSLYa0Q,4073
928
932
  official/vision/data/process_coco_few_shot_json_files.py,sha256=Xw0uak8H_-h7qoJh67uISmwLtpbGz3GBIhmJkeVRUbw,6061
929
933
  official/vision/data/tf_example_builder.py,sha256=oGpo-dGBOZ78_c13LMJ_AAlA_5EDSPMkCjmumewLZTU,20494
930
934
  official/vision/data/tf_example_builder_test.py,sha256=Iv3mZijkn6ue8I0y09c6TXUkOpdcFq99bO2wY5MtW24,27986
@@ -1026,7 +1030,7 @@ official/vision/modeling/decoders/nasfpn.py,sha256=rpfi14Y2wIeVjGkDTLnNGbdPLH27C
1026
1030
  official/vision/modeling/decoders/nasfpn_test.py,sha256=7X9m41vtK6hdwh9vbjgbjM-YtxRP7j_7s1yjNRN3IVc,1897
1027
1031
  official/vision/modeling/heads/__init__.py,sha256=R9kh3QZDCs2LFbJSMYf5C8_o8vlsIMyypik3lC8lUHQ,1088
1028
1032
  official/vision/modeling/heads/dense_prediction_heads.py,sha256=ToFb7UOo-bGMyft5NzmQ0NxaKeN0KTqR4fTFV-tda4M,25991
1029
- official/vision/modeling/heads/dense_prediction_heads_test.py,sha256=GYuXzdQksuUgXVnt7itapy4ZpZEquscUorgMaqDEaM4,8266
1033
+ official/vision/modeling/heads/dense_prediction_heads_test.py,sha256=WNmneB9TVKbNPhNeVTQWGRPY7pYLgcsejpjZAb_Stp0,8270
1030
1034
  official/vision/modeling/heads/instance_heads.py,sha256=JNP8gJWH2ND_d4IcBOZQ4eA8kBea0epWnp31hJnoj9Y,17829
1031
1035
  official/vision/modeling/heads/instance_heads_test.py,sha256=S4crAnUSmm0xTqoz1uew9oMYCPM1g4vE-Sq9x-chY3U,4207
1032
1036
  official/vision/modeling/heads/segmentation_heads.py,sha256=kkeZwx0hRMDofHwEZDw4W-DAbbaqr-cRK_CXucr5kTc,20184
@@ -1084,7 +1088,7 @@ official/vision/serving/export_module_factory.py,sha256=L7unMBF_axsRhvUEizQxGKE4
1084
1088
  official/vision/serving/export_module_factory_test.py,sha256=Y2jI3BbbQZNLspaAgb3HovGZBj8oY9gjXt5EQCgKS3s,4605
1085
1089
  official/vision/serving/export_saved_model.py,sha256=fB_NNZEs2uExhZKKgvX0WLeDqvbl20-vy15dFZCyQ4Y,5758
1086
1090
  official/vision/serving/export_saved_model_lib.py,sha256=Fm6FAq6U-ODdVc2VZu-MoYulkW2TNnUF767zlBrFBv0,7675
1087
- official/vision/serving/export_saved_model_lib_test.py,sha256=kuQPLh5s--JeNq0-oTuZXIK5BZcwaMXdR1v12NNwvu8,2409
1091
+ official/vision/serving/export_saved_model_lib_test.py,sha256=lz6EeHcTk59nuAVQ6YNHhSrqjPpw_ZMdLyKhEf8zi1c,2411
1088
1092
  official/vision/serving/export_saved_model_lib_v2.py,sha256=9hGuKwhTdMOX9Rq42TVdb4zuYMPsuurTDmsesEzNEyA,3710
1089
1093
  official/vision/serving/export_tfhub.py,sha256=ES-BDGUjhfv8u93S1RKnY-ZJw-g25NAxKAygNHpLvfI,3500
1090
1094
  official/vision/serving/export_tfhub_lib.py,sha256=BV3GEYaX3CJD2TWWBAaQ5jstMnhsfCDecXad-XrVji0,2880
@@ -1122,7 +1126,7 @@ official/vision/utils/object_detection/shape_utils.py,sha256=2rghJjGoDghjqtf2qcJ
1122
1126
  official/vision/utils/object_detection/target_assigner.py,sha256=7lg2C4rH8qZua5mPv02F-GX0V59Ub-nH7UsKQ4_vLb8,24208
1123
1127
  official/vision/utils/object_detection/visualization_utils.py,sha256=9NSBmQS-LS3VSnfWBIYOGGgfOINAndz3-Fo1It_8zqs,40266
1124
1128
  orbit/__init__.py,sha256=aQRo8zqIQ0Dw4JQReZeiB6MmuJLvvw4DbYHYti5AGys,1117
1125
- orbit/controller.py,sha256=kcyDUegeh-gA4uAfDPbNaNgDjamfujdaKRD2NeJhupI,25092
1129
+ orbit/controller.py,sha256=iOpz5DP-pSisTjUxCsMvYP_Q2YsKwfsSvdqmLnOrJfw,25368
1126
1130
  orbit/controller_test.py,sha256=FABwjwpeCKbW-FhmqztfWv8NuBUjr5uG7mrmqEjf6DY,31802
1127
1131
  orbit/runner.py,sha256=ZKRa513ydT05Dpr6s0xfO0SuASziRoN652_iIPIVl_E,3519
1128
1132
  orbit/standard_runner.py,sha256=ixT1wSC8XX_w2cvhmYnd16Q4s9k5-A4gp8ZjxxZtVH4,17412
@@ -1154,9 +1158,9 @@ tensorflow_models/__init__.py,sha256=Ciz_YBke6teb6y42QyQTUBDdXJAiV7Qdu1zOoZvYiKw
1154
1158
  tensorflow_models/tensorflow_models_test.py,sha256=qUBLFZg7rmKkVQ3cHJVlkoid8cPqjjyc2ZiWtjQO5_o,1395
1155
1159
  tensorflow_models/nlp/__init__.py,sha256=3dULDpUBpDi9vljpXadq6oJrWH4y6z42Bz2d3hopYZw,807
1156
1160
  tensorflow_models/vision/__init__.py,sha256=4y77XkHaH8qLls3-6ta4tMp3Xj8CLbB0ihH91HsQ9z4,833
1157
- tf_models_nightly-2.14.0.dev20231107.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1158
- tf_models_nightly-2.14.0.dev20231107.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1159
- tf_models_nightly-2.14.0.dev20231107.dist-info/METADATA,sha256=nO4XglbAIIbgIzvA7CNg5SeToC58F4tfGA1b4VN3dN8,1414
1160
- tf_models_nightly-2.14.0.dev20231107.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1161
- tf_models_nightly-2.14.0.dev20231107.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1162
- tf_models_nightly-2.14.0.dev20231107.dist-info/RECORD,,
1161
+ tf_models_nightly-2.14.0.dev20231109.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
1162
+ tf_models_nightly-2.14.0.dev20231109.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
1163
+ tf_models_nightly-2.14.0.dev20231109.dist-info/METADATA,sha256=BWk_f3H8oifIkJqTybyk43OXHtT_uxPfyQ0_OJT5E9g,1414
1164
+ tf_models_nightly-2.14.0.dev20231109.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
1165
+ tf_models_nightly-2.14.0.dev20231109.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
1166
+ tf_models_nightly-2.14.0.dev20231109.dist-info/RECORD,,