tf-keras-nightly 2.18.0.dev2024070709__py3-none-any.whl → 2.18.0.dev2024071209__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tf_keras/__init__.py CHANGED
@@ -27,4 +27,4 @@ from tf_keras.src.engine.sequential import Sequential
27
27
  from tf_keras.src.engine.training import Model
28
28
 
29
29
 
30
- __version__ = "2.18.0.dev2024070709"
30
+ __version__ = "2.18.0.dev2024071209"
tf_keras/src/callbacks.py CHANGED
@@ -1903,6 +1903,23 @@ class BackupAndRestore(Callback):
1903
1903
  "only supports empty strategy, "
1904
1904
  "MirroredStrategy, MultiWorkerMirroredStrategy and TPUStrategy."
1905
1905
  )
1906
+
1907
+ # Re-initialize the optimizer.
1908
+ if self.model.built:
1909
+ if (
1910
+ self.model.optimizer is not None
1911
+ and callable(getattr(self.model.optimizer, "build", None))
1912
+ and not getattr(self.model.optimizer, "_built", False)
1913
+ ):
1914
+ self.model.optimizer.build(self.model.trainable_variables)
1915
+ else:
1916
+ logging.warning(
1917
+ "To use the BackupAndRestore callback, "
1918
+ "you model must be built before you call `fit()`. "
1919
+ f"Model {self.model} is unbuilt. You can build it "
1920
+ "beforehand by calling it on a batch of data."
1921
+ )
1922
+
1906
1923
  self.model._training_state = worker_training_state.WorkerTrainingState(
1907
1924
  self.model,
1908
1925
  self.backup_dir,
@@ -172,6 +172,10 @@ class CuDNNGRU(_CuDNNRNN):
172
172
  shape=self._vector_shape,
173
173
  )
174
174
 
175
+ batch_dim = tf.shape(inputs)[1]
176
+ max_sequence_length = tf.shape(inputs)[0]
177
+ sequence_lengths = tf.fill([batch_dim], max_sequence_length)
178
+
175
179
  args = {
176
180
  "input": inputs,
177
181
  "input_h": input_h,
@@ -179,9 +183,10 @@ class CuDNNGRU(_CuDNNRNN):
179
183
  "params": params,
180
184
  "is_training": True,
181
185
  "rnn_mode": "gru",
186
+ "sequence_lengths": sequence_lengths,
182
187
  }
183
188
 
184
- outputs, h, _, _, _ = tf.raw_ops.CudnnRNNV2(**args)
189
+ outputs, h, _, _, _ = tf.raw_ops.CudnnRNNV3(**args)
185
190
 
186
191
  if self.stateful or self.return_state:
187
192
  h = h[0]
@@ -204,15 +204,20 @@ class CuDNNLSTM(_CuDNNRNN):
204
204
  shape=self._vector_shape,
205
205
  )
206
206
 
207
+ batch_dim = tf.shape(inputs)[1]
208
+ max_sequence_length = tf.shape(inputs)[0]
209
+ sequence_lengths = tf.fill([batch_dim], max_sequence_length)
210
+
207
211
  args = {
208
212
  "input": inputs,
209
213
  "input_h": input_h,
210
214
  "input_c": input_c,
211
215
  "params": params,
212
216
  "is_training": True,
217
+ "sequence_lengths": sequence_lengths,
213
218
  }
214
219
 
215
- outputs, h, c, _, _ = tf.raw_ops.CudnnRNNV2(**args)
220
+ outputs, h, c, _, _ = tf.raw_ops.CudnnRNNV3(**args)
216
221
 
217
222
  if self.stateful or self.return_state:
218
223
  h = h[0]
@@ -1034,11 +1034,13 @@ def gpu_gru(
1034
1034
  mask, time_major
1035
1035
  )
1036
1036
 
1037
- if not time_major and sequence_lengths is None:
1038
- inputs = tf.transpose(inputs, perm=(1, 0, 2))
1039
- seq_axis, batch_axis = (0, 1)
1040
- else:
1041
- seq_axis, batch_axis = (0, 1) if time_major else (1, 0)
1037
+ seq_axis, batch_axis = (0, 1) if time_major else (1, 0)
1038
+
1039
+ if sequence_lengths is None:
1040
+ max_sequence_length = tf.shape(inputs)[seq_axis]
1041
+ batch_size = tf.shape(inputs)[batch_axis]
1042
+ sequence_lengths = tf.fill([batch_size], max_sequence_length)
1043
+
1042
1044
  # For init_h, cuDNN expects one more dim of num_layers before or after batch
1043
1045
  # dim for time major or batch major inputs respectively
1044
1046
  init_h = tf.expand_dims(init_h, axis=seq_axis)
@@ -1069,49 +1071,36 @@ def gpu_gru(
1069
1071
  transpose_weights=True,
1070
1072
  )
1071
1073
 
1072
- if sequence_lengths is not None:
1073
- if go_backwards:
1074
- # Three reversals are required. E.g.,
1075
- # normal input = [1, 2, 3, 0, 0] # where 0 need to be masked
1076
- # reversed_input_to_cudnn = [3, 2, 1, 0, 0]
1077
- # output_from_cudnn = [6, 5, 4, 0, 0]
1078
- # expected_output = [0, 0, 6, 5 ,4]
1079
- inputs = tf.reverse_sequence(
1080
- inputs,
1081
- sequence_lengths,
1082
- seq_axis=seq_axis,
1083
- batch_axis=batch_axis,
1084
- )
1085
- outputs, h, _, _, _ = tf.raw_ops.CudnnRNNV3(
1086
- input=inputs,
1087
- input_h=init_h,
1088
- input_c=0,
1089
- params=params,
1090
- is_training=True,
1091
- rnn_mode="gru",
1092
- sequence_lengths=sequence_lengths,
1093
- time_major=time_major,
1074
+ if go_backwards:
1075
+ # Three reversals are required. E.g.,
1076
+ # normal input = [1, 2, 3, 0, 0] # where 0 need to be masked
1077
+ # reversed_input_to_cudnn = [3, 2, 1, 0, 0]
1078
+ # output_from_cudnn = [6, 5, 4, 0, 0]
1079
+ # expected_output = [0, 0, 6, 5 ,4]
1080
+ inputs = tf.reverse_sequence(
1081
+ inputs,
1082
+ sequence_lengths,
1083
+ seq_axis=seq_axis,
1084
+ batch_axis=batch_axis,
1094
1085
  )
1095
- if go_backwards:
1096
- outputs = tf.reverse_sequence(
1097
- outputs,
1098
- sequence_lengths,
1099
- seq_axis=seq_axis,
1100
- batch_axis=batch_axis,
1101
- )
1102
- outputs = tf.reverse(outputs, axis=[seq_axis])
1103
- else:
1104
- if go_backwards:
1105
- # Reverse axis 0 since the input is already convert to time major.
1106
- inputs = tf.reverse(inputs, axis=[0])
1107
- outputs, h, _, _ = tf.raw_ops.CudnnRNN(
1108
- input=inputs,
1109
- input_h=init_h,
1110
- input_c=0,
1111
- params=params,
1112
- is_training=True,
1113
- rnn_mode="gru",
1086
+ outputs, h, _, _, _ = tf.raw_ops.CudnnRNNV3(
1087
+ input=inputs,
1088
+ input_h=init_h,
1089
+ input_c=0,
1090
+ params=params,
1091
+ is_training=True,
1092
+ rnn_mode="gru",
1093
+ sequence_lengths=sequence_lengths,
1094
+ time_major=time_major,
1095
+ )
1096
+ if go_backwards:
1097
+ outputs = tf.reverse_sequence(
1098
+ outputs,
1099
+ sequence_lengths,
1100
+ seq_axis=seq_axis,
1101
+ batch_axis=batch_axis,
1114
1102
  )
1103
+ outputs = tf.reverse(outputs, axis=[seq_axis])
1115
1104
 
1116
1105
  last_output = outputs[-1]
1117
1106
  if not time_major and sequence_lengths is None and return_sequences:
@@ -1063,11 +1063,13 @@ def gpu_lstm(
1063
1063
  mask, time_major
1064
1064
  )
1065
1065
 
1066
- if not time_major and sequence_lengths is None:
1067
- inputs = tf.transpose(inputs, perm=(1, 0, 2))
1068
- seq_axis, batch_axis = (0, 1)
1069
- else:
1070
- seq_axis, batch_axis = (0, 1) if time_major else (1, 0)
1066
+ seq_axis, batch_axis = (0, 1) if time_major else (1, 0)
1067
+
1068
+ if sequence_lengths is None:
1069
+ max_sequence_length = tf.shape(inputs)[seq_axis]
1070
+ batch_size = tf.shape(inputs)[batch_axis]
1071
+ sequence_lengths = tf.fill([batch_size], max_sequence_length)
1072
+
1071
1073
  # For init_h and init_c, cuDNN expects one more dim of num_layers before or
1072
1074
  # after batch dim for time major or batch major inputs respectively
1073
1075
  init_h = tf.expand_dims(init_h, axis=seq_axis)
@@ -1099,52 +1101,36 @@ def gpu_lstm(
1099
1101
  transpose_weights=True,
1100
1102
  )
1101
1103
 
1102
- if sequence_lengths is not None:
1103
- if go_backwards:
1104
- # Three reversals are required. E.g.,
1105
- # normal input = [1, 2, 3, 0, 0] # where 0 need to be masked
1106
- # reversed_input_to_cudnn = [3, 2, 1, 0, 0]
1107
- # output_from_cudnn = [6, 5, 4, 0, 0]
1108
- # expected_output = [0, 0, 6, 5 ,4]
1109
- inputs = tf.reverse_sequence(
1110
- inputs,
1111
- sequence_lengths,
1112
- seq_axis=seq_axis,
1113
- batch_axis=batch_axis,
1114
- )
1115
- outputs, h, c, _, _ = tf.raw_ops.CudnnRNNV3(
1116
- input=inputs,
1117
- input_h=init_h,
1118
- input_c=init_c,
1119
- params=params,
1120
- is_training=True,
1121
- rnn_mode="lstm",
1122
- sequence_lengths=sequence_lengths,
1123
- time_major=time_major,
1104
+ if go_backwards:
1105
+ # Three reversals are required. E.g.,
1106
+ # normal input = [1, 2, 3, 0, 0] # where 0 need to be masked
1107
+ # reversed_input_to_cudnn = [3, 2, 1, 0, 0]
1108
+ # output_from_cudnn = [6, 5, 4, 0, 0]
1109
+ # expected_output = [0, 0, 6, 5 ,4]
1110
+ inputs = tf.reverse_sequence(
1111
+ inputs,
1112
+ sequence_lengths,
1113
+ seq_axis=seq_axis,
1114
+ batch_axis=batch_axis,
1124
1115
  )
1125
- if go_backwards:
1126
- outputs = tf.reverse_sequence(
1127
- outputs,
1128
- sequence_lengths,
1129
- seq_axis=seq_axis,
1130
- batch_axis=batch_axis,
1131
- )
1132
- outputs = tf.reverse(outputs, axis=[seq_axis])
1133
- else:
1134
- # # Fill the array with shape [batch] with value of max timesteps.
1135
- # sequence_length = array_ops.fill([array_ops.shape(inputs)[1]],
1136
- # array_ops.shape(inputs)[0])
1137
- if go_backwards:
1138
- # Reverse axis 0 since the input is already convert to time major.
1139
- inputs = tf.reverse(inputs, axis=[0])
1140
- outputs, h, c, _ = tf.raw_ops.CudnnRNN(
1141
- input=inputs,
1142
- input_h=init_h,
1143
- input_c=init_c,
1144
- params=params,
1145
- is_training=True,
1146
- rnn_mode="lstm",
1116
+ outputs, h, c, _, _ = tf.raw_ops.CudnnRNNV3(
1117
+ input=inputs,
1118
+ input_h=init_h,
1119
+ input_c=init_c,
1120
+ params=params,
1121
+ is_training=True,
1122
+ rnn_mode="lstm",
1123
+ sequence_lengths=sequence_lengths,
1124
+ time_major=time_major,
1125
+ )
1126
+ if go_backwards:
1127
+ outputs = tf.reverse_sequence(
1128
+ outputs,
1129
+ sequence_lengths,
1130
+ seq_axis=seq_axis,
1131
+ batch_axis=batch_axis,
1147
1132
  )
1133
+ outputs = tf.reverse(outputs, axis=[seq_axis])
1148
1134
 
1149
1135
  last_output = outputs[-1]
1150
1136
  if not time_major and sequence_lengths is None and return_sequences:
@@ -124,20 +124,21 @@ class AutoCastVariable(tf.Variable, tf.__internal__.types.Tensor):
124
124
  def _should_cast(self):
125
125
  """Returns True if this variable should be casted when accessed."""
126
126
  autocast_dtype = getattr(_autocast_dtype, "dtype", None)
127
- return autocast_dtype is not None and self.dtype != autocast_dtype
127
+ return autocast_dtype is not None and self.true_dtype != autocast_dtype
128
128
 
129
129
  @property
130
130
  def dtype(self):
131
- """The dtype of the underlying variable, before any casts are done."""
132
- return self._variable.dtype
131
+ """The dtype when the value is accessed, that is after casting."""
132
+ return self._cast_dtype
133
133
 
134
134
  @property
135
135
  def true_dtype(self):
136
- """Deprecated alias of `dtype`."""
136
+ """The dtype of the underlying variable, before any casts are done."""
137
137
  return self._variable.dtype
138
138
 
139
139
  @property
140
140
  def _cast_dtype(self):
141
+ """The dtype after casting."""
141
142
  dtype = getattr(_autocast_dtype, "dtype", None)
142
143
  return dtype or self._variable.dtype
143
144
 
@@ -202,7 +203,8 @@ class AutoCastVariable(tf.Variable, tf.__internal__.types.Tensor):
202
203
  if tf.executing_eagerly() and not self._in_graph_mode:
203
204
  repr_str = (
204
205
  "<AutoCastVariable '{v.name}' shape={v.shape} "
205
- "dtype={v.dtype.name} dtype_to_cast_to={v._cast_dtype.name}, "
206
+ "dtype={v.true_dtype.name} "
207
+ "dtype_to_cast_to={v._cast_dtype.name}, "
206
208
  "numpy={np_repr}>"
207
209
  )
208
210
  return repr_str.format(
@@ -211,7 +213,8 @@ class AutoCastVariable(tf.Variable, tf.__internal__.types.Tensor):
211
213
  else:
212
214
  repr_str = (
213
215
  "<AutoCastVariable '{v.name}' shape={v.shape} "
214
- "dtype={v.dtype.name} dtype_to_cast_to={v._cast_dtype.name}>"
216
+ "dtype={v.true_dtype.name} "
217
+ "dtype_to_cast_to={v._cast_dtype.name}>"
215
218
  )
216
219
  return repr_str.format(v=self)
217
220
 
@@ -261,6 +264,9 @@ class AutoCastVariable(tf.Variable, tf.__internal__.types.Tensor):
261
264
  def _apply_assign_update(
262
265
  self, update_fn, value, use_locking=None, name=None, read_value=True
263
266
  ):
267
+ # In auto cast scope, we cast back to the actual variable dtype.
268
+ if self._should_cast():
269
+ value = tf.cast(value, self.true_dtype)
264
270
  # TODO(b/146181571): This logic can be simplified once
265
271
  # DistributedVariable.assign returns a DistributedVariable. Currently
266
272
  # for MirroredStrategy, it returns a Mirrored value.
@@ -1033,6 +1033,13 @@ class OptimizerV2(tf.__internal__.tracking.Trackable):
1033
1033
  slot_dict = self._slots.setdefault(var_key, {})
1034
1034
  weight = slot_dict.get(slot_name, None)
1035
1035
  if weight is None:
1036
+ # Under a mixed precision policy, variables report their "cast"
1037
+ # dtype. However, we want to use the original dtype for slots.
1038
+ if hasattr(var, "true_dtype"):
1039
+ dtype = var.true_dtype
1040
+ else:
1041
+ dtype = var.dtype
1042
+
1036
1043
  if isinstance(initializer, str) or callable(initializer):
1037
1044
  initializer = initializers.get(initializer)
1038
1045
  if isinstance(
@@ -1043,7 +1050,7 @@ class OptimizerV2(tf.__internal__.tracking.Trackable):
1043
1050
  else:
1044
1051
  slot_shape = var.shape
1045
1052
  initial_value = functools.partial(
1046
- initializer, shape=slot_shape, dtype=var.dtype
1053
+ initializer, shape=slot_shape, dtype=dtype
1047
1054
  )
1048
1055
  else:
1049
1056
  initial_value = initializer
@@ -1064,7 +1071,7 @@ class OptimizerV2(tf.__internal__.tracking.Trackable):
1064
1071
  with strategy.extended.colocate_vars_with(var):
1065
1072
  weight = tf.Variable(
1066
1073
  name=f"{var._shared_name}/{slot_name}",
1067
- dtype=var.dtype,
1074
+ dtype=dtype,
1068
1075
  trainable=False,
1069
1076
  initial_value=initial_value,
1070
1077
  )
@@ -498,26 +498,28 @@ class _BaseOptimizer(tf.__internal__.tracking.AutoTrackable):
498
498
  Returns:
499
499
  An optimizer variable.
500
500
  """
501
+ # Under a mixed precision policy, variables report their "cast"
502
+ # dtype. However, we want to use the original dtype for slots.
503
+ if hasattr(model_variable, "true_dtype"):
504
+ dtype = model_variable.true_dtype
505
+ else:
506
+ dtype = model_variable.dtype
501
507
  if initial_value is None:
502
508
  if shape is None:
503
509
  if model_variable.shape.rank is None:
504
510
  # When the rank is None, we cannot get a concrete
505
511
  # `model_variable.shape`, we use dynamic shape.
506
- initial_value = tf.zeros_like(
507
- model_variable, dtype=model_variable.dtype
508
- )
512
+ initial_value = tf.zeros_like(model_variable, dtype=dtype)
509
513
  else:
510
514
  # We cannot always use `zeros_like`, because some cases
511
515
  # the shape exists while values don't.
512
- initial_value = tf.zeros(
513
- model_variable.shape, dtype=model_variable.dtype
514
- )
516
+ initial_value = tf.zeros(model_variable.shape, dtype=dtype)
515
517
  else:
516
- initial_value = tf.zeros(shape, dtype=model_variable.dtype)
518
+ initial_value = tf.zeros(shape, dtype=dtype)
517
519
  variable = tf.Variable(
518
520
  initial_value=initial_value,
519
521
  name=f"{variable_name}/{model_variable._shared_name}",
520
- dtype=model_variable.dtype,
522
+ dtype=dtype,
521
523
  trainable=False,
522
524
  )
523
525
  # If model_variable is a shard of a ShardedVariable, we should add a
@@ -1188,10 +1190,17 @@ class Optimizer(_BaseOptimizer):
1188
1190
  self._mesh, rank=initial_value.shape.rank
1189
1191
  ),
1190
1192
  )
1193
+ # Under a mixed precision policy, variables report their "cast"
1194
+ # dtype. However, we want to use the original dtype for optimizer
1195
+ # variables.
1196
+ if hasattr(model_variable, "true_dtype"):
1197
+ dtype = model_variable.true_dtype
1198
+ else:
1199
+ dtype = model_variable.dtype
1191
1200
  variable = tf.experimental.dtensor.DVariable(
1192
1201
  initial_value=initial_value,
1193
1202
  name=f"{variable_name}/{model_variable._shared_name}",
1194
- dtype=model_variable.dtype,
1203
+ dtype=dtype,
1195
1204
  trainable=False,
1196
1205
  )
1197
1206
  self._variables.append(variable)
@@ -229,7 +229,7 @@ class StepsPerExecutionTuner:
229
229
 
230
230
  if current_spe >= spe_limit:
231
231
  new_spe = current_spe
232
- elif current_spe == 0:
232
+ elif current_spe <= 0:
233
233
  new_spe = self.init_spe
234
234
 
235
235
  self._steps_per_execution.assign(np.round(new_spe))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tf_keras-nightly
3
- Version: 2.18.0.dev2024070709
3
+ Version: 2.18.0.dev2024071209
4
4
  Summary: Deep learning for humans.
5
5
  Home-page: https://keras.io/
6
6
  Download-URL: https://github.com/keras-team/tf-keras/tags
@@ -1,4 +1,4 @@
1
- tf_keras/__init__.py,sha256=gl2epJKFt4LGWlkUQKMjgwFJsfG7Wk3GgnLzROoKN_A,911
1
+ tf_keras/__init__.py,sha256=E-if5YveVJC1JqPKyqGvjAbsck6mDgdSPQ7K1IlWoVs,911
2
2
  tf_keras/__internal__/__init__.py,sha256=OHQbeIC0QtRBI7dgXaJaVbH8F00x8dCI-DvEcIfyMsE,671
3
3
  tf_keras/__internal__/backend/__init__.py,sha256=LnMs2A6685gDG79fxqmdulIYlVE_3WmXlBTBo9ZWYcw,162
4
4
  tf_keras/__internal__/layers/__init__.py,sha256=F5SGMhOTPzm-PR44VrfinURHcVeQPIEdwnZlAkSTB3A,176
@@ -210,7 +210,7 @@ tf_keras/src/__init__.py,sha256=zogCD1H65aeYRu_EMjB0bBQBXq2IAww7ys3SjEw1IKU,1502
210
210
  tf_keras/src/activations.py,sha256=QNTCdIuNGww5BPwkkjkaNZf4j09m27Nqi-r4aTBOxnk,22630
211
211
  tf_keras/src/backend.py,sha256=025YEbBOWW2J_iqO3IpPx549tsJtcHqtlQadXq4ssWs,248504
212
212
  tf_keras/src/backend_config.py,sha256=DaKkQg6jLmzR2GtgjNxwFoHuTXwVcAzx_Hx8XgAKPNs,4516
213
- tf_keras/src/callbacks.py,sha256=Z1yQ1Gt2QDVBIaxi0kDf1BtkWXrHQ1fUjKn7fMrIJrY,133627
213
+ tf_keras/src/callbacks.py,sha256=YbaWLKVl18aFSCirA6ZkoMt14UzufGzHpOyBqtewnkY,134312
214
214
  tf_keras/src/callbacks_v1.py,sha256=iT1NSRgN0Yw3joDTB3uKy4bEzb6Az6E5CTNH77wElUs,22154
215
215
  tf_keras/src/constraints.py,sha256=vxdBJEQz3rmtGSyCwou_WSCQUhaw6Wz_OgJoW1dtyIo,13949
216
216
  tf_keras/src/losses.py,sha256=dhdaZfY56-ZimHgfckyQYBKxvLDeYyLEU_mAyxM8H4g,110698
@@ -458,15 +458,15 @@ tf_keras/src/layers/rnn/cell_wrappers.py,sha256=8IBJYjollddx4DULRFfs2k-L_6wjN3dA
458
458
  tf_keras/src/layers/rnn/conv_lstm1d.py,sha256=suShze6ipNXabGlKJTxkOia17ZP4SeEei3Mi4F8lFOQ,8761
459
459
  tf_keras/src/layers/rnn/conv_lstm2d.py,sha256=myxOioB3yNn0L_-gMh0R41sb-MwTXO993lAT05_N0Zw,8874
460
460
  tf_keras/src/layers/rnn/conv_lstm3d.py,sha256=GT4OoPFtCr5xgaaqy3ezt5DyDu8Ut-wQEihCOHFk0D4,8969
461
- tf_keras/src/layers/rnn/cudnn_gru.py,sha256=wmOK3iNJltvj-I-qn62vS4MbJBwB3xtLSoY9V5cJbIs,8625
462
- tf_keras/src/layers/rnn/cudnn_lstm.py,sha256=IGM2g1CGnRnof1PKUS6TWkjTM3UgD1SVpNObylRZoJM,10114
461
+ tf_keras/src/layers/rnn/cudnn_gru.py,sha256=8xXHC2w7zBHJh6yIjIBaavGg336955AvPfeuK-9g5s0,8835
462
+ tf_keras/src/layers/rnn/cudnn_lstm.py,sha256=_Y-19bvo3_WsnBULprb6FjEFHH8kAD8wHsbLa6y3-LM,10324
463
463
  tf_keras/src/layers/rnn/dropout_rnn_cell_mixin.py,sha256=8K10_2IhX8x7edq0_aBfKEpA3O991dJXvr3dKs7EZfw,7610
464
- tf_keras/src/layers/rnn/gru.py,sha256=tHBjJkI4KEQEP6LwlySmGmfLmxz7ZDRFDK2bwjQkI0A,50746
464
+ tf_keras/src/layers/rnn/gru.py,sha256=_TT51hEnf3zLXcC-Nh9I4TXWGBoRBv4bYCXUTNsM59Y,50259
465
465
  tf_keras/src/layers/rnn/gru_lstm_utils.py,sha256=mEs-0Tk2iphhC7FClhLTk0QBfxSNYpbrVHpXxQ2ebL4,9922
466
466
  tf_keras/src/layers/rnn/gru_v1.py,sha256=9fuHFObkrvH5euyefVaupxJ15ctRgaHLFuZIbKBbgWY,15761
467
467
  tf_keras/src/layers/rnn/legacy_cell_wrappers.py,sha256=84oVtywCAxv2rALypz9_PFuBtZAXlWlAIFRg6yOKFBw,25382
468
468
  tf_keras/src/layers/rnn/legacy_cells.py,sha256=YB59DHuZwVQa9CikUNAHug0GgGocnixP9l4-ixf7Hhc,52845
469
- tf_keras/src/layers/rnn/lstm.py,sha256=WYq-Ih-L_1S1qMSvqOsH4tJ9auk9SiIy3Y-LnMISkR4,53482
469
+ tf_keras/src/layers/rnn/lstm.py,sha256=X9Tkqg-b0FNtR_ehQpmFTbWhjP18wpngoGnBrwXUo4w,52770
470
470
  tf_keras/src/layers/rnn/lstm_v1.py,sha256=6Ot8lHDlKeZsTxuvob4pSlnEljB2vAg1HoerYYQJtmo,15790
471
471
  tf_keras/src/layers/rnn/rnn_utils.py,sha256=0LiMi0efBFIi4RNPxpjTV6TjIn69WxbUSGpm1JVMJQM,8180
472
472
  tf_keras/src/layers/rnn/simple_rnn.py,sha256=JAxrvRhxMvE1qt5sh-Mc728o71QgJIuM4wpADY8yY6I,19937
@@ -491,7 +491,7 @@ tf_keras/src/metrics/probabilistic_metrics.py,sha256=sX8I4r5UXNz3eNBlq6-JNkuNoth
491
491
  tf_keras/src/metrics/py_metric.py,sha256=jmyBid791Q8N7tOYTO9byffjH2rJ8YnppoUmEbKuATU,7236
492
492
  tf_keras/src/metrics/regression_metrics.py,sha256=ZYxCCs6qJERmDXmaH5IIw2_lt5zI5aCsAzLaX4732Ko,21098
493
493
  tf_keras/src/mixed_precision/__init__.py,sha256=40TWeUS803bREZotAqMgJKjywtbGtd-pToJ2rNU_yiE,1110
494
- tf_keras/src/mixed_precision/autocast_variable.py,sha256=rTmf66jdaujhm1KqbXS-L4SpZ_l-3Oy-rmxdBrPSTuk,22394
494
+ tf_keras/src/mixed_precision/autocast_variable.py,sha256=1Rz_cG1qtInnbCtKRJtP59YFaBeFTcleUH4ZkaDFPds,22671
495
495
  tf_keras/src/mixed_precision/device_compatibility_check.py,sha256=oSVZwizUlPMTnhJxr7zgWxX2v8jHOHhyZCwDCo8aYK0,6252
496
496
  tf_keras/src/mixed_precision/loss_scale_optimizer.py,sha256=A_WXEJc0XCAWFsy55f6EWmDORRSX4LoeGcHrC-upHvo,64122
497
497
  tf_keras/src/mixed_precision/policy.py,sha256=1GWHp99dU0f6D0h_jIrSQkoLyIf0ClRJ0BbwHqIYiCg,22734
@@ -510,7 +510,7 @@ tf_keras/src/optimizers/ftrl.py,sha256=y4XvrHBzneuEfkK_VaSL0-93GR0hE88NRdDD_T3MD
510
510
  tf_keras/src/optimizers/legacy_learning_rate_decay.py,sha256=IK_MrwKSn9t1jbW87_-NL83y8svcFX033xzs0DXT9Zs,30051
511
511
  tf_keras/src/optimizers/lion.py,sha256=v7xfzhQafuW4gtNH4X0bTM37yv-XRTPABl_Wt-wYah4,6157
512
512
  tf_keras/src/optimizers/nadam.py,sha256=6czfMnkOS_-wwG51lhMNcBa_DCuIAx2esEk6hKzQINM,7452
513
- tf_keras/src/optimizers/optimizer.py,sha256=NvVezMvdcM-pujsRteG9PILqrZeJwSw7XQIKk7fZPk4,61536
513
+ tf_keras/src/optimizers/optimizer.py,sha256=V9G4BbTsGkCQe3ZoDjn85ONf97Ggd4o0uwNeIvvgCzw,62009
514
514
  tf_keras/src/optimizers/optimizer_v1.py,sha256=S6TPXqe53wL_cPDoMJ4pA7sWRCE5s42k1hb4-oClNQg,32631
515
515
  tf_keras/src/optimizers/rmsprop.py,sha256=b3LAWsxSAny7xXHsFFo8T0aLYSjg_X4R4okxciVC2NA,7890
516
516
  tf_keras/src/optimizers/sgd.py,sha256=cIfImtdzvaAgH4unxJXo6txFIs7h_1rBk8i1AAtNCTA,6763
@@ -523,7 +523,7 @@ tf_keras/src/optimizers/legacy/adamax.py,sha256=tTI5ajxqnZBmcFuspSsaqytxDTIuoBC8
523
523
  tf_keras/src/optimizers/legacy/ftrl.py,sha256=nRFnSHmTUIgV7apkaVWR4XR1hF3YnqCJxvNfRl6NSEo,12644
524
524
  tf_keras/src/optimizers/legacy/gradient_descent.py,sha256=ueMcfrSWc3j-g8kne_eFHvLOgEa582VM1DDj9Lv0-Ys,7965
525
525
  tf_keras/src/optimizers/legacy/nadam.py,sha256=IQxti8ZTWgaavP2z8ydRaizFfeaN2RXRqcYhqGb9opw,9998
526
- tf_keras/src/optimizers/legacy/optimizer_v2.py,sha256=sG9tBa_CChLUFihhZ6FDnERtovv_oeIhfmKK9yvhdj4,69599
526
+ tf_keras/src/optimizers/legacy/optimizer_v2.py,sha256=SR7Hdhi6Vk6kPSm7LrETnjVouM-PvEfarJBjxFcioMk,69877
527
527
  tf_keras/src/optimizers/legacy/rmsprop.py,sha256=jGoPiNA4xfnE4SOuZNTBj2-Do0l81xShd1mfdsylX4Q,14732
528
528
  tf_keras/src/optimizers/schedules/__init__.py,sha256=otlrYjzO1uYlfR2PE124yoEANdnUhsrgtX1ILp2Ahbc,1101
529
529
  tf_keras/src/optimizers/schedules/learning_rate_schedule.py,sha256=7KKPbqIZpJPQeiLduuFoDcGSYAFKB-xH2lL9iHLOY9I,48113
@@ -592,7 +592,7 @@ tf_keras/src/utils/mode_keys.py,sha256=_QYq58qr_b-RhvMYBYnL47NkC0G1ng8NYcVnS_IYi
592
592
  tf_keras/src/utils/np_utils.py,sha256=4EZ58G1zThQfQEmMNBPnUYRszXRJoY4foxYhOGfS89s,4805
593
593
  tf_keras/src/utils/object_identity.py,sha256=HZEETVcCoBrnIFjnxmBhZaCKP9xQMv9rMr_ihlMveVs,6879
594
594
  tf_keras/src/utils/sidecar_evaluator.py,sha256=uFgLlX-Qwd1Dg9ACEdY4I_a-A4qucDu_df69XAKrSqw,18474
595
- tf_keras/src/utils/steps_per_execution_tuning.py,sha256=i1TBoaKxsWJz3o3BBYYhZJwj6_mEOI3Ul01zJyIZys8,9586
595
+ tf_keras/src/utils/steps_per_execution_tuning.py,sha256=jBDh5Xi1X17kJ8Uz0bK_qU_ZSzKUHWt1KTnDXGYJUbA,9586
596
596
  tf_keras/src/utils/text_dataset.py,sha256=HcGKN607b4L4fdNmPOHkN8wbEF6BQ3Uq8CPF6Zz26uI,11084
597
597
  tf_keras/src/utils/tf_contextlib.py,sha256=ysTHicWjRBEVGNC6cKSCO7GTX1DxGNX9Z0vi4j9Q6Z8,1300
598
598
  tf_keras/src/utils/tf_inspect.py,sha256=hRMwGwU15gqC8JPhFJemU6Aa5J99Z1gerHT9u93AkKI,14237
@@ -606,7 +606,7 @@ tf_keras/src/utils/legacy/__init__.py,sha256=EfMmeHYDzwvxNaktPhQbkTdcPSIGCqMhBND
606
606
  tf_keras/utils/__init__.py,sha256=b7_d-USe_EmLo02_P99Q1rUCzKBYayPCfiYFStP-0nw,2735
607
607
  tf_keras/utils/experimental/__init__.py,sha256=DzGogE2AosjxOVILQBT8PDDcqbWTc0wWnZRobCdpcec,97
608
608
  tf_keras/utils/legacy/__init__.py,sha256=7ujlDa5HeSRcth2NdqA0S1P2-VZF1kB3n68jye6Dj-8,189
609
- tf_keras_nightly-2.18.0.dev2024070709.dist-info/METADATA,sha256=EswCWArBtz0HN56YW4lOXMOKmHkYcky9H3RD0gwYYWU,1638
610
- tf_keras_nightly-2.18.0.dev2024070709.dist-info/WHEEL,sha256=y4mX-SOX4fYIkonsAGA5N0Oy-8_gI4FXw5HNI1xqvWg,91
611
- tf_keras_nightly-2.18.0.dev2024070709.dist-info/top_level.txt,sha256=LC8FK7zHDNKxB17C6lGKvrZ_fZZGJsRiBK23SfiDegY,9
612
- tf_keras_nightly-2.18.0.dev2024070709.dist-info/RECORD,,
609
+ tf_keras_nightly-2.18.0.dev2024071209.dist-info/METADATA,sha256=QC-_bSWH78WeKH66AEHswezITPLAmc8RXoIPd37_CKs,1638
610
+ tf_keras_nightly-2.18.0.dev2024071209.dist-info/WHEEL,sha256=Z4pYXqR_rTB7OWNDYFOm1qRk0RX6GFP2o8LgvP453Hk,91
611
+ tf_keras_nightly-2.18.0.dev2024071209.dist-info/top_level.txt,sha256=LC8FK7zHDNKxB17C6lGKvrZ_fZZGJsRiBK23SfiDegY,9
612
+ tf_keras_nightly-2.18.0.dev2024071209.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (70.2.0)
2
+ Generator: setuptools (70.3.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5