tf-models-nightly 2.12.0.dev20230501__py2.py3-none-any.whl → 2.12.0.dev20230502__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- official/nlp/modeling/layers/tn_expand_condense_test.py +1 -16
- official/projects/yolo/serving/export_module_factory.py +12 -5
- official/projects/yolo/serving/export_saved_model.py +1 -2
- official/vision/utils/object_detection/visualization_utils.py +1 -1
- {tf_models_nightly-2.12.0.dev20230501.dist-info → tf_models_nightly-2.12.0.dev20230502.dist-info}/METADATA +1 -1
- {tf_models_nightly-2.12.0.dev20230501.dist-info → tf_models_nightly-2.12.0.dev20230502.dist-info}/RECORD +10 -10
- {tf_models_nightly-2.12.0.dev20230501.dist-info → tf_models_nightly-2.12.0.dev20230502.dist-info}/AUTHORS +0 -0
- {tf_models_nightly-2.12.0.dev20230501.dist-info → tf_models_nightly-2.12.0.dev20230502.dist-info}/LICENSE +0 -0
- {tf_models_nightly-2.12.0.dev20230501.dist-info → tf_models_nightly-2.12.0.dev20230502.dist-info}/WHEEL +0 -0
- {tf_models_nightly-2.12.0.dev20230501.dist-info → tf_models_nightly-2.12.0.dev20230502.dist-info}/top_level.txt +0 -0
@@ -27,7 +27,7 @@ class TNLayerTest(tf.test.TestCase, parameterized.TestCase):
|
|
27
27
|
"""
|
28
28
|
|
29
29
|
def setUp(self):
|
30
|
-
super(
|
30
|
+
super().setUp()
|
31
31
|
self.labels = np.concatenate((np.ones((50, 1)), np.zeros((50, 1))), axis=0)
|
32
32
|
|
33
33
|
def _build_model(self, data, proj_multiple=2):
|
@@ -41,21 +41,6 @@ class TNLayerTest(tf.test.TestCase, parameterized.TestCase):
|
|
41
41
|
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
|
42
42
|
return model
|
43
43
|
|
44
|
-
@parameterized.parameters((768, 6), (1024, 2))
|
45
|
-
def test_keras_layer(self, input_dim, proj_multiple):
|
46
|
-
data = np.random.normal(size=(100, input_dim))
|
47
|
-
data = data.astype(np.float32)
|
48
|
-
tf.keras.__internal__.utils.layer_test(
|
49
|
-
TNExpandCondense,
|
50
|
-
kwargs={
|
51
|
-
'proj_multiplier': proj_multiple,
|
52
|
-
'input_shape': data.shape
|
53
|
-
},
|
54
|
-
input_shape=data.shape,
|
55
|
-
input_data=data,
|
56
|
-
expected_output_shape=(None, data.shape[-1]),
|
57
|
-
expected_output_dtype=data.dtype)
|
58
|
-
|
59
44
|
@parameterized.parameters((768, 6), (1024, 2))
|
60
45
|
def test_train(self, input_dim, proj_multiple):
|
61
46
|
tf.keras.utils.set_random_seed(0)
|
@@ -21,6 +21,7 @@ import tensorflow as tf
|
|
21
21
|
from official.core import config_definitions as cfg
|
22
22
|
from official.core import export_base
|
23
23
|
from official.projects.yolo.configs.yolo import YoloTask
|
24
|
+
from official.projects.yolo.configs.yolov7 import YoloV7Task
|
24
25
|
from official.projects.yolo.modeling import factory as yolo_factory
|
25
26
|
from official.projects.yolo.modeling.backbones import darknet # pylint: disable=unused-import
|
26
27
|
from official.projects.yolo.modeling.decoders import yolo_decoder # pylint: disable=unused-import
|
@@ -163,10 +164,16 @@ def create_yolo_export_module(
|
|
163
164
|
input_type, batch_size, input_image_size, num_channels, input_name)
|
164
165
|
input_specs = tf.keras.layers.InputSpec(shape=[batch_size] +
|
165
166
|
input_image_size + [num_channels])
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
167
|
+
if isinstance(params.task, YoloTask):
|
168
|
+
model, _ = yolo_factory.build_yolo(
|
169
|
+
input_specs=input_specs,
|
170
|
+
model_config=params.task.model,
|
171
|
+
l2_regularization=None)
|
172
|
+
elif isinstance(params.task, YoloV7Task):
|
173
|
+
model = yolo_factory.build_yolov7(
|
174
|
+
input_specs=input_specs,
|
175
|
+
model_config=params.task.model,
|
176
|
+
l2_regularization=None)
|
170
177
|
|
171
178
|
def preprocess_fn(inputs):
|
172
179
|
image_tensor = export_utils.parse_image(inputs, input_type,
|
@@ -247,7 +254,7 @@ def get_export_module(params: cfg.ExperimentConfig,
|
|
247
254
|
input_image_size,
|
248
255
|
num_channels,
|
249
256
|
input_name)
|
250
|
-
elif isinstance(params.task, YoloTask):
|
257
|
+
elif isinstance(params.task, (YoloTask, YoloV7Task)):
|
251
258
|
export_module = create_yolo_export_module(params, input_type, batch_size,
|
252
259
|
input_image_size, num_channels,
|
253
260
|
input_name)
|
@@ -38,9 +38,8 @@ from absl import flags
|
|
38
38
|
|
39
39
|
from official.core import exp_factory
|
40
40
|
from official.modeling import hyperparams
|
41
|
-
from official.projects.yolo.
|
41
|
+
from official.projects.yolo.common import registry_imports # pylint: disable=unused-import
|
42
42
|
from official.projects.yolo.serving import export_module_factory
|
43
|
-
from official.projects.yolo.tasks import yolo as task # pylint: disable=unused-import
|
44
43
|
from official.vision.serving import export_saved_model_lib
|
45
44
|
|
46
45
|
FLAGS = flags.FLAGS
|
@@ -878,7 +878,7 @@ def update_detection_state(step_outputs=None) -> Dict[str, Any]:
|
|
878
878
|
else:
|
879
879
|
detection_keypoints = None
|
880
880
|
|
881
|
-
if detection_keypoints:
|
881
|
+
if detection_keypoints is not None:
|
882
882
|
state['detection_keypoints'] = tf.concat(detection_keypoints, axis=0)
|
883
883
|
|
884
884
|
detection_masks = step_outputs['visualization'][1].get(
|
@@ -355,7 +355,7 @@ official/nlp/modeling/layers/talking_heads_attention_test.py,sha256=oqgiQPsNLpck
|
|
355
355
|
official/nlp/modeling/layers/text_layers.py,sha256=soVaf1cqb7s25Fk0i01wCJE97sscY-PFfVNjpcuJjhs,32516
|
356
356
|
official/nlp/modeling/layers/text_layers_test.py,sha256=0aP-lzjnuWrn26HuWncnj7ASIEMFRVrR6W_Mwob3WdE,24245
|
357
357
|
official/nlp/modeling/layers/tn_expand_condense.py,sha256=yzCgGwXfTC0Mumn8HmbczZ1pG9JYkpIE2LLb702ADUU,6703
|
358
|
-
official/nlp/modeling/layers/tn_expand_condense_test.py,sha256=
|
358
|
+
official/nlp/modeling/layers/tn_expand_condense_test.py,sha256=LXu-Of1ck-SeTCfUo9gGfL_CFTxPLVpJTQw8NtCX0TU,5890
|
359
359
|
official/nlp/modeling/layers/tn_transformer_expand_condense.py,sha256=RK5vryB7W8cU58_BWzfee2id1RB6BT2NVhP9FlYkXYI,11022
|
360
360
|
official/nlp/modeling/layers/tn_transformer_test.py,sha256=frCu6LKapVWXw0YLs8_7KMp5qxEOHNBy7yJIZiE17qc,8883
|
361
361
|
official/nlp/modeling/layers/transformer.py,sha256=1URI5Z3ZV6DQ2oTgdJIc6EXbgutSlDZ7l-gYoV1HCcE,19414
|
@@ -761,8 +761,8 @@ official/projects/yolo/optimization/configs/__init__.py,sha256=1ToRMjre4mErL4Ek4
|
|
761
761
|
official/projects/yolo/optimization/configs/optimization_config.py,sha256=NmxuFW64_MYFcBF_SxN-34rkQ7PxAswpxpufBL2GIPA,1998
|
762
762
|
official/projects/yolo/optimization/configs/optimizer_config.py,sha256=0YZgwyEQKfEwP44x9wzZ7N01I0bdawqEOgWmezora0Y,2283
|
763
763
|
official/projects/yolo/serving/__init__.py,sha256=1ToRMjre4mErL4Ek4_dMVxMjXNPossNXggV8fqbISao,609
|
764
|
-
official/projects/yolo/serving/export_module_factory.py,sha256=
|
765
|
-
official/projects/yolo/serving/export_saved_model.py,sha256=
|
764
|
+
official/projects/yolo/serving/export_module_factory.py,sha256=tRtJbbaLxV1BLm3OTTkyzkkyRrGxOTJdcg4gCuInW6k,9910
|
765
|
+
official/projects/yolo/serving/export_saved_model.py,sha256=gOxopjUl4Axp_Z1ArE_QeUW2qM0JiX71I040lQOKDrY,4285
|
766
766
|
official/projects/yolo/serving/export_tflite.py,sha256=Pt1rkKu9TxVYB8HtLadJQ_buWzNXqWL8Zp0TFDPUXzw,902
|
767
767
|
official/projects/yolo/serving/model_fn.py,sha256=J1i-UPX1TuPqPaBWu2HyhtVHthq6VRbsnR98Sjrs0iY,3008
|
768
768
|
official/projects/yolo/tasks/__init__.py,sha256=1ToRMjre4mErL4Ek4_dMVxMjXNPossNXggV8fqbISao,609
|
@@ -1049,7 +1049,7 @@ official/vision/utils/object_detection/preprocessor.py,sha256=Fklxck111TQEKsrWSM
|
|
1049
1049
|
official/vision/utils/object_detection/region_similarity_calculator.py,sha256=yqQLLRT80IdAu3K_fILli_u1aL37lv0FpDtFcyRrPzs,4544
|
1050
1050
|
official/vision/utils/object_detection/shape_utils.py,sha256=p3Q7e9gTTQNv1gnMAkuTkfXc6DYVB8mjk_Vjlq7bRlg,3608
|
1051
1051
|
official/vision/utils/object_detection/target_assigner.py,sha256=fTkjedzhp_-RTUGR27tWbmnnLpW7F3lVCkZlr7Nv_9o,24198
|
1052
|
-
official/vision/utils/object_detection/visualization_utils.py,sha256=
|
1052
|
+
official/vision/utils/object_detection/visualization_utils.py,sha256=ldXEkcW0O9Tdm-0HrRhtCi-gm7bho--2ky96DqfVH_o,34527
|
1053
1053
|
orbit/__init__.py,sha256=aQRo8zqIQ0Dw4JQReZeiB6MmuJLvvw4DbYHYti5AGys,1117
|
1054
1054
|
orbit/controller.py,sha256=RFcHfK4ZfgnscJC1uAlHav5SkehM2sFOeatyIpNnfTo,23667
|
1055
1055
|
orbit/controller_test.py,sha256=sx1ok0Epn9gKMtN2bWCavAtIe5Q4WD_hLN2HqD-lsxo,29924
|
@@ -1083,9 +1083,9 @@ tensorflow_models/__init__.py,sha256=Ciz_YBke6teb6y42QyQTUBDdXJAiV7Qdu1zOoZvYiKw
|
|
1083
1083
|
tensorflow_models/tensorflow_models_test.py,sha256=Kz2y4V-rtBhZFFfKD2soCq52hviSfJVV1L2ztqS-9oM,1385
|
1084
1084
|
tensorflow_models/nlp/__init__.py,sha256=3dULDpUBpDi9vljpXadq6oJrWH4y6z42Bz2d3hopYZw,807
|
1085
1085
|
tensorflow_models/vision/__init__.py,sha256=4y77XkHaH8qLls3-6ta4tMp3Xj8CLbB0ihH91HsQ9z4,833
|
1086
|
-
tf_models_nightly-2.12.0.
|
1087
|
-
tf_models_nightly-2.12.0.
|
1088
|
-
tf_models_nightly-2.12.0.
|
1089
|
-
tf_models_nightly-2.12.0.
|
1090
|
-
tf_models_nightly-2.12.0.
|
1091
|
-
tf_models_nightly-2.12.0.
|
1086
|
+
tf_models_nightly-2.12.0.dev20230502.dist-info/AUTHORS,sha256=1dG3fXVu9jlo7bul8xuix5F5vOnczMk7_yWn4y70uw0,337
|
1087
|
+
tf_models_nightly-2.12.0.dev20230502.dist-info/LICENSE,sha256=WxeBS_DejPZQabxtfMOM_xn8qoZNJDQjrT7z2wG1I4U,11512
|
1088
|
+
tf_models_nightly-2.12.0.dev20230502.dist-info/METADATA,sha256=XiV5z3VirmxIf1tN16rxMxsHSF5dNi941h2yEcWKaYo,1393
|
1089
|
+
tf_models_nightly-2.12.0.dev20230502.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
|
1090
|
+
tf_models_nightly-2.12.0.dev20230502.dist-info/top_level.txt,sha256=gum2FfO5R4cvjl2-QtP-S1aNmsvIZaFFT6VFzU0f4-g,33
|
1091
|
+
tf_models_nightly-2.12.0.dev20230502.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|