keras-nightly 3.14.0.dev2026012304__py3-none-any.whl → 3.14.0.dev2026012404__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -245,6 +245,7 @@ from keras.src.ops.numpy import mod as mod
245
245
  from keras.src.ops.numpy import moveaxis as moveaxis
246
246
  from keras.src.ops.numpy import multiply as multiply
247
247
  from keras.src.ops.numpy import nan_to_num as nan_to_num
248
+ from keras.src.ops.numpy import nanmax as nanmax
248
249
  from keras.src.ops.numpy import nanmin as nanmin
249
250
  from keras.src.ops.numpy import nansum as nansum
250
251
  from keras.src.ops.numpy import ndim as ndim
@@ -129,6 +129,7 @@ from keras.src.ops.numpy import mod as mod
129
129
  from keras.src.ops.numpy import moveaxis as moveaxis
130
130
  from keras.src.ops.numpy import multiply as multiply
131
131
  from keras.src.ops.numpy import nan_to_num as nan_to_num
132
+ from keras.src.ops.numpy import nanmax as nanmax
132
133
  from keras.src.ops.numpy import nanmin as nanmin
133
134
  from keras.src.ops.numpy import nansum as nansum
134
135
  from keras.src.ops.numpy import ndim as ndim
keras/ops/__init__.py CHANGED
@@ -245,6 +245,7 @@ from keras.src.ops.numpy import mod as mod
245
245
  from keras.src.ops.numpy import moveaxis as moveaxis
246
246
  from keras.src.ops.numpy import multiply as multiply
247
247
  from keras.src.ops.numpy import nan_to_num as nan_to_num
248
+ from keras.src.ops.numpy import nanmax as nanmax
248
249
  from keras.src.ops.numpy import nanmin as nanmin
249
250
  from keras.src.ops.numpy import nansum as nansum
250
251
  from keras.src.ops.numpy import ndim as ndim
@@ -129,6 +129,7 @@ from keras.src.ops.numpy import mod as mod
129
129
  from keras.src.ops.numpy import moveaxis as moveaxis
130
130
  from keras.src.ops.numpy import multiply as multiply
131
131
  from keras.src.ops.numpy import nan_to_num as nan_to_num
132
+ from keras.src.ops.numpy import nanmax as nanmax
132
133
  from keras.src.ops.numpy import nanmin as nanmin
133
134
  from keras.src.ops.numpy import nansum as nansum
134
135
  from keras.src.ops.numpy import ndim as ndim
@@ -1013,6 +1013,11 @@ def moveaxis(x, source, destination):
1013
1013
  return jnp.moveaxis(x, source=source, destination=destination)
1014
1014
 
1015
1015
 
1016
+ def nanmax(x, axis=None, keepdims=False):
1017
+ x = convert_to_tensor(x)
1018
+ return jnp.nanmax(x, axis=axis, keepdims=keepdims)
1019
+
1020
+
1016
1021
  def nanmin(x, axis=None, keepdims=False):
1017
1022
  x = convert_to_tensor(x)
1018
1023
  return jnp.nanmin(x, axis=axis, keepdims=keepdims)
@@ -960,6 +960,10 @@ def moveaxis(x, source, destination):
960
960
  return np.moveaxis(x, source=source, destination=destination)
961
961
 
962
962
 
963
+ def nanmax(x, axis=None, keepdims=False):
964
+ return np.nanmax(x, axis=axis, keepdims=keepdims)
965
+
966
+
963
967
  def nanmin(x, axis=None, keepdims=False):
964
968
  return np.nanmin(x, axis=axis, keepdims=keepdims)
965
969
 
@@ -2107,6 +2107,10 @@ def moveaxis(x, source, destination):
2107
2107
  return OpenVINOKerasTensor(ov_opset.transpose(x, axes_const).output(0))
2108
2108
 
2109
2109
 
2110
+ def nanmax(x, axis=None, keepdims=False):
2111
+ raise NotImplementedError("`nanmax` is not supported with openvino backend")
2112
+
2113
+
2110
2114
  def nanmin(x, axis=None, keepdims=False):
2111
2115
  raise NotImplementedError("`nanmin` is not supported with openvino backend")
2112
2116
 
@@ -2125,6 +2125,26 @@ def moveaxis(x, source, destination):
2125
2125
  return tf.transpose(x, perm)
2126
2126
 
2127
2127
 
2128
+ def nanmax(x, axis=None, keepdims=False):
2129
+ x = convert_to_tensor(x)
2130
+
2131
+ if not x.dtype.is_floating:
2132
+ dtype = standardize_dtype(x.dtype)
2133
+ if dtype == "bool":
2134
+ return tf.reduce_any(x, axis=axis, keepdims=keepdims)
2135
+ return tf.reduce_max(x, axis=axis, keepdims=keepdims)
2136
+
2137
+ x_clean = tf.where(
2138
+ tf.math.is_nan(x), tf.constant(float("-inf"), dtype=x.dtype), x
2139
+ )
2140
+
2141
+ return tf.where(
2142
+ tf.reduce_all(tf.math.is_nan(x), axis=axis, keepdims=keepdims),
2143
+ tf.constant(float("nan"), dtype=x.dtype),
2144
+ tf.reduce_max(x_clean, axis=axis, keepdims=keepdims),
2145
+ )
2146
+
2147
+
2128
2148
  def nanmin(x, axis=None, keepdims=False):
2129
2149
  x = convert_to_tensor(x)
2130
2150
 
@@ -1,6 +1,7 @@
1
1
  import builtins
2
2
  import contextlib
3
3
  import functools
4
+ import os
4
5
 
5
6
  import ml_dtypes
6
7
  import numpy as np
@@ -26,7 +27,9 @@ IS_THREAD_SAFE = True
26
27
  # Some operators such as 'aten::_foreach_mul_.Scalar'
27
28
  # are not currently implemented for the MPS device.
28
29
  # check https://github.com/pytorch/pytorch/issues/77764.
29
- if torch.backends.mps.is_available():
30
+ if "KERAS_TORCH_DEVICE" in os.environ:
31
+ DEFAULT_DEVICE = os.environ["KERAS_TORCH_DEVICE"]
32
+ elif torch.backends.mps.is_available():
30
33
  DEFAULT_DEVICE = "mps"
31
34
  elif torch.cuda.is_available():
32
35
  DEFAULT_DEVICE = "cuda"
@@ -1131,10 +1131,6 @@ def dot_product_attention(
1131
1131
  flash_attention=None,
1132
1132
  attn_logits_soft_cap=None,
1133
1133
  ):
1134
- if bias is not None:
1135
- raise ValueError(
1136
- "torch's `dot_product_attention` doesn't support `bias`."
1137
- )
1138
1134
  query = convert_to_tensor(query)
1139
1135
  key = convert_to_tensor(key)
1140
1136
  value = convert_to_tensor(value)
@@ -1144,6 +1140,10 @@ def dot_product_attention(
1144
1140
  f"Received: query.shape={query.shape}, key.shape={key.shape}, "
1145
1141
  f"value.shape={value.shape}."
1146
1142
  )
1143
+ if bias is not None and mask is not None:
1144
+ raise ValueError(
1145
+ "Only one of `bias` and `mask` can be provided. Received both."
1146
+ )
1147
1147
  compute_dtype = backend.result_type(query.dtype, key.dtype, value.dtype)
1148
1148
  query = cast(query, compute_dtype)
1149
1149
  key = cast(key, compute_dtype)
@@ -1154,6 +1154,9 @@ def dot_product_attention(
1154
1154
  # Explicit set `is_causal` to `False` when `mask` is not `None`.
1155
1155
  is_causal = False
1156
1156
  mask = torch.where(mask, 0.0, _get_large_negative(query.dtype))
1157
+ if bias is not None:
1158
+ bias = convert_to_tensor(bias, dtype=compute_dtype)
1159
+ mask = bias # Use `bias` as `mask` for scaled_dot_product_attention.
1157
1160
 
1158
1161
  axis0, axis1 = 1, 2
1159
1162
  query = torch.transpose(query, axis0, axis1)
@@ -1272,6 +1272,24 @@ def moveaxis(x, source, destination):
1272
1272
  return torch.moveaxis(x, source=source, destination=destination)
1273
1273
 
1274
1274
 
1275
+ def nanmax(x, axis=None, keepdims=False):
1276
+ x = convert_to_tensor(x)
1277
+ if not torch.is_floating_point(x):
1278
+ return torch.amax(x, dim=axis, keepdim=keepdims)
1279
+
1280
+ if axis == () or axis == []:
1281
+ return x
1282
+
1283
+ x_clean = torch.where(torch.isnan(x), float("-inf"), x)
1284
+ out = torch.amax(x_clean, dim=axis, keepdim=keepdims)
1285
+
1286
+ return torch.where(
1287
+ torch.isnan(x).all(dim=axis, keepdim=keepdims),
1288
+ torch.tensor(float("nan"), dtype=x.dtype, device=get_device()),
1289
+ out,
1290
+ )
1291
+
1292
+
1275
1293
  def nanmin(x, axis=None, keepdims=False):
1276
1294
  x = convert_to_tensor(x)
1277
1295
  if not torch.is_floating_point(x):
@@ -71,37 +71,43 @@ class TimeDistributed(Wrapper):
71
71
  super().build(child_input_shape)
72
72
 
73
73
  def call(self, inputs, training=None, mask=None):
74
- input_shape = ops.shape(inputs)
75
- mask_shape = None if mask is None else ops.shape(mask)
76
- batch_size = input_shape[0]
77
- timesteps = input_shape[1]
78
-
79
- # For TF backend with graph mode and `partial_batch_size`, skip
80
- # evaluation of `batch_size` as it can be a `strided_slice` and
81
- # not a constant.
82
- if backend.backend() == "tensorflow":
83
- from keras.src.utils.module_utils import tensorflow as tf
84
-
85
- if (
86
- not tf.executing_eagerly
87
- and mask_shape is not None
88
- and mask_shape[1:2] != (timesteps,)
89
- ):
74
+ # Validate mask shape using static shape info when available
75
+ if mask is not None:
76
+ mask_shape = mask.shape
77
+ input_shape = inputs.shape
78
+
79
+ # Check if mask has at least 2 dimensions (batch and timesteps)
80
+ if len(mask_shape) < 2:
90
81
  raise ValueError(
91
- "`TimeDistributed` Layer should be passed a `mask` of "
92
- f"shape ({batch_size}, {timesteps}, ...), "
93
- f"received: mask.shape={mask_shape}"
82
+ "The `mask` passed to the `TimeDistributed` layer must be "
83
+ "at least 2D (e.g., `(batch_size, timesteps)`), but it has "
84
+ f"{len(mask_shape)} dimension(s) with shape {mask_shape}."
94
85
  )
95
- elif mask_shape is not None and mask_shape[:2] != (
96
- batch_size,
97
- timesteps,
98
- ):
99
- raise ValueError(
100
- "`TimeDistributed` Layer should be passed a `mask` of "
101
- f"shape ({batch_size}, {timesteps}, ...), "
102
- f"received: mask.shape={mask_shape}"
86
+
87
+ # Check batch size and timesteps dimensions match
88
+ batch_mismatch = (
89
+ input_shape[0] is not None
90
+ and mask_shape[0] is not None
91
+ and input_shape[0] != mask_shape[0]
92
+ )
93
+ time_mismatch = (
94
+ input_shape[1] is not None
95
+ and mask_shape[1] is not None
96
+ and input_shape[1] != mask_shape[1]
103
97
  )
104
98
 
99
+ if batch_mismatch or time_mismatch:
100
+ raise ValueError(
101
+ "The `mask` passed to the `TimeDistributed` layer has a "
102
+ f"shape {mask_shape} that is incompatible with the input "
103
+ f"shape {input_shape}. The first two dimensions of the "
104
+ "mask (batch size and timesteps) must match the input's "
105
+ "first two dimensions. Expected mask shape prefix: "
106
+ f"({input_shape[0]}, {input_shape[1]})."
107
+ )
108
+
109
+ input_shape = ops.shape(inputs)
110
+
105
111
  def time_distributed_transpose(data):
106
112
  """Swaps the timestep and batch dimensions of a tensor."""
107
113
  axes = [1, 0, *range(2, len(data.shape))]
@@ -129,5 +135,7 @@ class TimeDistributed(Wrapper):
129
135
 
130
136
  # Implementation #2: use backend.vectorized_map.
131
137
 
132
- outputs = backend.vectorized_map(step_function, ops.arange(timesteps))
138
+ outputs = backend.vectorized_map(
139
+ step_function, ops.arange(input_shape[0])
140
+ )
133
141
  return time_distributed_transpose(outputs)
keras/src/ops/core.py CHANGED
@@ -277,7 +277,7 @@ def associative_scan(f, elems, reverse=False, axis=0):
277
277
  [0, 1, 3, 6, 10]
278
278
 
279
279
  >>> sum_fn = lambda x, y: [x[0] + y[0], x[1] + y[1], x[2] + y[2]]
280
- >>> xs = [keras.ops.array([[1, 2]]) for _ in range(3)]
280
+ >>> xs = [keras.ops.array([1, 2]) for _ in range(3)]
281
281
  >>> ys = keras.ops.associative_scan(sum_fn, xs, axis=0)
282
282
  >>> ys
283
283
  [[1, 3], [1, 3], [1, 3]]
keras/src/ops/numpy.py CHANGED
@@ -5064,6 +5064,63 @@ def moveaxis(x, source, destination):
5064
5064
  return backend.numpy.moveaxis(x, source=source, destination=destination)
5065
5065
 
5066
5066
 
5067
+ class Nanmax(Operation):
5068
+ def __init__(self, axis=None, keepdims=False, *, name=None):
5069
+ super().__init__(name=name)
5070
+ self.axis = axis
5071
+ self.keepdims = keepdims
5072
+
5073
+ def call(self, x):
5074
+ return backend.numpy.nanmax(x, axis=self.axis, keepdims=self.keepdims)
5075
+
5076
+ def compute_output_spec(self, x):
5077
+ dtype = dtypes.result_type(getattr(x, "dtype", backend.floatx()))
5078
+
5079
+ if backend.backend() == "torch" and dtype == "uint32":
5080
+ dtype = "int32"
5081
+
5082
+ return KerasTensor(
5083
+ reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims),
5084
+ dtype=dtype,
5085
+ )
5086
+
5087
+
5088
+ @keras_export(["keras.ops.nanmax", "keras.ops.numpy.nanmax"])
5089
+ def nanmax(x, axis=None, keepdims=False):
5090
+ """Maximum of a tensor over the given axes, ignoring NaNs.
5091
+
5092
+ Args:
5093
+ x: Input tensor.
5094
+ axis: Axis or axes along which the maximum is computed.
5095
+ The default is to compute the maximum of the flattened tensor.
5096
+ keepdims: If this is set to `True`, the axes which are reduced are left
5097
+ in the result as dimensions with size one. Defaults
5098
+ to `False`.
5099
+
5100
+ Returns:
5101
+ Output tensor containing the maximum, with NaN values ignored. If all
5102
+ values along a reduced axis are NaN, the result is NaN.
5103
+
5104
+ Examples:
5105
+ >>> import numpy as np
5106
+ >>> from keras import ops
5107
+ >>> x = np.array([[1.0, np.nan, 3.0],
5108
+ ... [np.nan, 2.0, 1.0]])
5109
+ >>> ops.nanmax(x)
5110
+ 3.0
5111
+
5112
+ >>> ops.nanmax(x, axis=1)
5113
+ array([3., 2.])
5114
+
5115
+ >>> ops.nanmax(x, axis=1, keepdims=True)
5116
+ array([[3.],
5117
+ [2.]])
5118
+ """
5119
+ if any_symbolic_tensors((x,)):
5120
+ return Nanmax(axis=axis, keepdims=keepdims).symbolic_call(x)
5121
+ return backend.numpy.nanmax(x, axis=axis, keepdims=keepdims)
5122
+
5123
+
5067
5124
  class Nanmin(Operation):
5068
5125
  def __init__(self, axis=None, keepdims=False, *, name=None):
5069
5126
  super().__init__(name=name)
keras/src/version.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from keras.src.api_export import keras_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "3.14.0.dev2026012304"
4
+ __version__ = "3.14.0.dev2026012404"
5
5
 
6
6
 
7
7
  @keras_export("keras.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: keras-nightly
3
- Version: 3.14.0.dev2026012304
3
+ Version: 3.14.0.dev2026012404
4
4
  Summary: Multi-backend Keras
5
5
  Author-email: Keras team <keras-users@googlegroups.com>
6
6
  License: Apache License 2.0
@@ -45,11 +45,11 @@ keras/_tf_keras/keras/losses/__init__.py,sha256=xBc_KOtSLwp3h3CKQ0EnCuIy-Bsak2SP
45
45
  keras/_tf_keras/keras/metrics/__init__.py,sha256=_wF31PTvua5ahF9JEW4Hx1UVNjVCLqVI8J5JNrZCBf8,6546
46
46
  keras/_tf_keras/keras/mixed_precision/__init__.py,sha256=AM51CzHqzcY75tqdpQiuVcTRUEpUzBqeb-EfLeSDSV8,727
47
47
  keras/_tf_keras/keras/models/__init__.py,sha256=83pyA0pzytqin8JLV6FEbPreCb-V64ToebxFGrHsVdQ,501
48
- keras/_tf_keras/keras/ops/__init__.py,sha256=Exo66cXhtlICdHcwCb9W1aY_kyKGmu_E0gx2g8arqAA,15667
48
+ keras/_tf_keras/keras/ops/__init__.py,sha256=j1hmnwcCXYOBqSaKHODKVyBwwS6N38KfC-W8_t9_GmU,15716
49
49
  keras/_tf_keras/keras/ops/image/__init__.py,sha256=oM_PLh5Jk9OGfi1bbJcfWkjoq0Ye5JQG9a7v_KzDfoc,1034
50
50
  keras/_tf_keras/keras/ops/linalg/__init__.py,sha256=0ab6icK3yuIm4khSfAksGRFLEAJhaOu6gGgarau4iEQ,822
51
51
  keras/_tf_keras/keras/ops/nn/__init__.py,sha256=2eD8IlkfBrsmJjHpzsxMM3_058oGeZVgohdBd27iDnI,2992
52
- keras/_tf_keras/keras/ops/numpy/__init__.py,sha256=Et9qtWaBOwKAOyJJcBnUjmu40MKJz6mRHtUXHdWdH-Q,9729
52
+ keras/_tf_keras/keras/ops/numpy/__init__.py,sha256=EEX_wcRRiw4E4Tq8Bm-n5AVi-OotP8FMdLRni12oEZQ,9778
53
53
  keras/_tf_keras/keras/optimizers/__init__.py,sha256=1fx0vEB-oGu-9dumxoIvX4qVHdgJvf74OLyYoBkE2y0,1267
54
54
  keras/_tf_keras/keras/optimizers/legacy/__init__.py,sha256=uIMQESCV80Q0FY-9ikQUjXYPyZqmTfAM3dfohQ5DzYs,516
55
55
  keras/_tf_keras/keras/optimizers/schedules/__init__.py,sha256=pQF3rQiAPuUSTUdflTr-fpL77oyGIv9xzGdjae3M3kw,1120
@@ -111,11 +111,11 @@ keras/losses/__init__.py,sha256=VIXBHQFNdLUPZ7JuwtIKj_4E-xf2yvNyrmdklvjr_xM,3667
111
111
  keras/metrics/__init__.py,sha256=qeEwtqpSCAaCr8BMUv1eVaqJl2Zb83OB5K0BG3JB0nI,6245
112
112
  keras/mixed_precision/__init__.py,sha256=AM51CzHqzcY75tqdpQiuVcTRUEpUzBqeb-EfLeSDSV8,727
113
113
  keras/models/__init__.py,sha256=83pyA0pzytqin8JLV6FEbPreCb-V64ToebxFGrHsVdQ,501
114
- keras/ops/__init__.py,sha256=Exo66cXhtlICdHcwCb9W1aY_kyKGmu_E0gx2g8arqAA,15667
114
+ keras/ops/__init__.py,sha256=j1hmnwcCXYOBqSaKHODKVyBwwS6N38KfC-W8_t9_GmU,15716
115
115
  keras/ops/image/__init__.py,sha256=oM_PLh5Jk9OGfi1bbJcfWkjoq0Ye5JQG9a7v_KzDfoc,1034
116
116
  keras/ops/linalg/__init__.py,sha256=0ab6icK3yuIm4khSfAksGRFLEAJhaOu6gGgarau4iEQ,822
117
117
  keras/ops/nn/__init__.py,sha256=2eD8IlkfBrsmJjHpzsxMM3_058oGeZVgohdBd27iDnI,2992
118
- keras/ops/numpy/__init__.py,sha256=Et9qtWaBOwKAOyJJcBnUjmu40MKJz6mRHtUXHdWdH-Q,9729
118
+ keras/ops/numpy/__init__.py,sha256=EEX_wcRRiw4E4Tq8Bm-n5AVi-OotP8FMdLRni12oEZQ,9778
119
119
  keras/optimizers/__init__.py,sha256=1fx0vEB-oGu-9dumxoIvX4qVHdgJvf74OLyYoBkE2y0,1267
120
120
  keras/optimizers/legacy/__init__.py,sha256=uIMQESCV80Q0FY-9ikQUjXYPyZqmTfAM3dfohQ5DzYs,516
121
121
  keras/optimizers/schedules/__init__.py,sha256=pQF3rQiAPuUSTUdflTr-fpL77oyGIv9xzGdjae3M3kw,1120
@@ -128,7 +128,7 @@ keras/regularizers/__init__.py,sha256=542Shphw7W8h4Dyf2rmqMKUECVZ8IVBvN9g1LWhz-b
128
128
  keras/saving/__init__.py,sha256=KvL2GZxjvgFgEhvEnkvqjIR9JSNHKz-NWZacXajsjLI,1298
129
129
  keras/src/__init__.py,sha256=Gi4S7EiCMkE03PbdGNpFdaUYySWDs_FcAJ8Taz9Y1BE,684
130
130
  keras/src/api_export.py,sha256=gXOkBOnmscV013WAc75lc4Up01-Kkg9EylIAT_QWctg,1173
131
- keras/src/version.py,sha256=zrwbXx271qaOFPQlnpEWKXzwfgQbyWOQOcBlTEB9byM,204
131
+ keras/src/version.py,sha256=qNjRmpUKTYhBSAghCCqq1ftuxf8a-hoOqn1ML8GDtTg,204
132
132
  keras/src/activations/__init__.py,sha256=0nL3IFDB9unlrMz8ninKOWo-uCHasTUpTo1tXZb2u44,4433
133
133
  keras/src/activations/activations.py,sha256=mogPggtp4CGldI3VOPNmesRxp6EbiR1_i4KLGaVwzL8,17614
134
134
  keras/src/applications/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -171,7 +171,7 @@ keras/src/backend/jax/layer.py,sha256=o6CicT06udwamTRQIjNSDLZLyYHFzBXNbxewXgWe0i
171
171
  keras/src/backend/jax/linalg.py,sha256=LDaLZYz49ChE2kJR3YpaM9xuwusvd3krV7nNAAazTWA,2642
172
172
  keras/src/backend/jax/math.py,sha256=1IEDpdoF8e5ltu3D4wbDQuihzvJHhMXz8W9Z_E-eJqU,9391
173
173
  keras/src/backend/jax/nn.py,sha256=mrRawNvf9EWe8rdTwK_Auz6xdLkVG6hH0nIAP7hyUDE,60271
174
- keras/src/backend/jax/numpy.py,sha256=e-EU_q5qbWL4tQAmuXgLTzBtAReMbkJ_lHtzmP2J634,38997
174
+ keras/src/backend/jax/numpy.py,sha256=XMBWC4MM3eOCm2tePDT1J-Bj8h6SH0VWvArGGe0-2X8,39125
175
175
  keras/src/backend/jax/optimizer.py,sha256=5DeXQHcYmUI6F9i1m1VHn3sBt4LEStOeBXnKdESevLM,4134
176
176
  keras/src/backend/jax/random.py,sha256=Uk2huGIk_dlzMrx5eDVrrr2TeCEMitn2vr4yzA0NXjs,3594
177
177
  keras/src/backend/jax/rnn.py,sha256=Ycq0qfLY4M4jhltvztpLQyywjEM17T7CZQFh4hhHOUE,7767
@@ -186,7 +186,7 @@ keras/src/backend/numpy/layer.py,sha256=dTk7W7ql7vRgll7JbOXK5PlIhQw5VHdpSjKciHd8
186
186
  keras/src/backend/numpy/linalg.py,sha256=uzLTxEyuX_gDcnoA5Q59GdTg33py0WooKK5T6T9Td6c,2543
187
187
  keras/src/backend/numpy/math.py,sha256=HdkEA5ro7dtQBTP78GFIgqTFLgNQ49PXHhqI1vLRGfo,10169
188
188
  keras/src/backend/numpy/nn.py,sha256=P9JAnTlwSTI7bVv8WIv1pDQJHpjML_WJ0RsJWy-LJMc,46200
189
- keras/src/backend/numpy/numpy.py,sha256=o71x6rkJgj1Mq2e0iJSIDq9v-xs_PFgskBioLc-_5cs,38168
189
+ keras/src/backend/numpy/numpy.py,sha256=UlqAexse7-DskEkQWw5mW2mXI-vBo6NO3jAoPwMzMM4,38266
190
190
  keras/src/backend/numpy/random.py,sha256=wx2nE75q7L2cBMjtQlQx8yKMj4Ie3puFMDQsbrZO8SA,3961
191
191
  keras/src/backend/numpy/rnn.py,sha256=thOsMung1qR3lQsR4_D6hqKMFollQgrB0KwsJLk4BMY,7867
192
192
  keras/src/backend/numpy/trainer.py,sha256=MzWr8_LLHa1P6fxdUWirGw_lQwHGF_vkZ7RUGLUzjUs,11126
@@ -198,7 +198,7 @@ keras/src/backend/openvino/layer.py,sha256=5RdvaH1yOyPAphjKiuQAK1H_yZFYKE1Hp7c5b
198
198
  keras/src/backend/openvino/linalg.py,sha256=L6a4MFGND2wWzPVCh44cwuOgkcC4wJTo8Xy3HwW04lg,1614
199
199
  keras/src/backend/openvino/math.py,sha256=qw9kX2sJ2qr0dBJF12Ey0E2GcwixPUqoev6UcNra4NI,3944
200
200
  keras/src/backend/openvino/nn.py,sha256=zULPxdwVO7JDZUUtsuoEEPCLQ09ew8z8T6G_i_NEqrM,23741
201
- keras/src/backend/openvino/numpy.py,sha256=D1ALQlwjLfIUnWslmUbDtvmqCUF4Hy_zYoSPV_O1PIc,109841
201
+ keras/src/backend/openvino/numpy.py,sha256=qmw4X3u0oJZptzqNHM33FPjTd_Ku6xY9dxxYFqW8c0g,109966
202
202
  keras/src/backend/openvino/random.py,sha256=4hRUtIP6qJxO3Qy9uH1x6jSuJna3nWPdUf4x2QU8-ew,5575
203
203
  keras/src/backend/openvino/rnn.py,sha256=ErmuZLPSgG9qU-NfYPPvBZ6Ysy8k-fA4g19Vhqq7OVQ,866
204
204
  keras/src/backend/openvino/trainer.py,sha256=bMmtSALqydqdS6ke-5sYW5fgxZDshDH810p_C0xCRTg,9087
@@ -211,7 +211,7 @@ keras/src/backend/tensorflow/layer.py,sha256=69d40LwL4HhKRsCjj1VRpjfrQXXF8VV3vh0
211
211
  keras/src/backend/tensorflow/linalg.py,sha256=_lZVfdY1tFvrN7xwbt3INGoTR0yC5v-kI1Q0XppVibY,8773
212
212
  keras/src/backend/tensorflow/math.py,sha256=zTu_7Ff6B2Ro862z_xH0OCmIWbV74DjsO5UnfjYuOUQ,12370
213
213
  keras/src/backend/tensorflow/nn.py,sha256=6vtZHzUED6_blUPE1Tnc3GAxPpJ2ebxoaiMn80tTL9k,51328
214
- keras/src/backend/tensorflow/numpy.py,sha256=j_EuTLDE8mgJSZuCt7yWHZUbvwQz3T-ZksSzCP3cl4s,105695
214
+ keras/src/backend/tensorflow/numpy.py,sha256=NHaXiWpFYVIUrZDTz3XaUKcN1AYsI9PGnlJmksOIOY8,106315
215
215
  keras/src/backend/tensorflow/optimizer.py,sha256=kFlyEOnGjEYdLpd8mpwhUeku78__xBfZbbrDWpJrq60,9307
216
216
  keras/src/backend/tensorflow/random.py,sha256=iO8V_soaDXZm9ewyAVbjudhsMj08C348c9Bz64nxXC4,6475
217
217
  keras/src/backend/tensorflow/rnn.py,sha256=JbOSpt48cm612c7YwiTYOQCQsNXyI_6QeRhtUn8qEvM,34829
@@ -220,14 +220,14 @@ keras/src/backend/tensorflow/tensorboard.py,sha256=e7pXicuMfQjuCmq1wOmixWhWt2Ebj
220
220
  keras/src/backend/tensorflow/trackable.py,sha256=QZn0JvpBJ7Kx4e6zM2IVIWz9ADcWDB-dHN6vjoQBa9Q,1993
221
221
  keras/src/backend/tensorflow/trainer.py,sha256=otJ4CciSmE2-QH17bEyjaL-oQxiCD5eWgbkEglxjt4w,37191
222
222
  keras/src/backend/torch/__init__.py,sha256=0SiJ91WMaE_tO5q1zUsLEnU6hmPTpGKPIOkmIWaHlhk,2131
223
- keras/src/backend/torch/core.py,sha256=zKEwMjwSyPojYDXNBJUd2AGdjZ0KZNDf_ArYsXzJNDU,24401
223
+ keras/src/backend/torch/core.py,sha256=cMo5dCDxWicWgezOargrq1qx-tlS1GJzzYXm--PS1l0,24506
224
224
  keras/src/backend/torch/export.py,sha256=yYc5-4JxSiaCkbFWpfCIdcm4dDBv_9uG_uH6JR6oGx0,4909
225
225
  keras/src/backend/torch/image.py,sha256=eer8LZwDMz3k2Dh5gxeTQfwrxPemM_H6eHMIP3AwRss,39149
226
226
  keras/src/backend/torch/layer.py,sha256=htECdpv9ioHWM8_zqQkEdxgDsgLu8XJi5yXgnLl-JFw,2084
227
227
  keras/src/backend/torch/linalg.py,sha256=wgPCfnscp5HOBmX9_-m-57lzxs1ttLNzmHqj2VYYq7k,2108
228
228
  keras/src/backend/torch/math.py,sha256=g-ElDii2Y_o1-t6BAu2nbS7JH-aPqVS5Fqds8aYzIlg,14324
229
- keras/src/backend/torch/nn.py,sha256=zmEzXEuwD7fVRDm145zsxzUDmqNmRgZS4LmeIx4Nbus,37498
230
- keras/src/backend/torch/numpy.py,sha256=JeMDIOubCyLqMqid1xg4CQ0Nm4gf5F7WLNggaiTviuE,58582
229
+ keras/src/backend/torch/nn.py,sha256=80MdDzkN7wV3MJbNsGh9B8IkdBoXC36wQcV8_o13y-8,37688
230
+ keras/src/backend/torch/numpy.py,sha256=19zGj9Yr98Aejtv_GLiqeDQjAd_MrBedpWTqGed_gYw,59093
231
231
  keras/src/backend/torch/random.py,sha256=YhLfC7qkGpzlU_i6gGPVormo3BMSo7OUA3TC3GCehrA,8292
232
232
  keras/src/backend/torch/rnn.py,sha256=MJIVbHKsUA2dZm4Gu2NvRxlrFCWeWSxSZRmFxSsC3Zg,26041
233
233
  keras/src/backend/torch/trainer.py,sha256=dcikz1c5O0FHNzRKSi6WhIHsHfLV2HDlrXPElSd1cgE,17985
@@ -459,7 +459,7 @@ keras/src/layers/rnn/lstm.py,sha256=9joVha35UQrnwQq6-khk7NbfiC9YwDvuS4kqb6S6DZ4,
459
459
  keras/src/layers/rnn/rnn.py,sha256=K4zhHjyO7vd1NvOMP3i3Bk4JWP-Ljb1C0693qmRKXA8,20171
460
460
  keras/src/layers/rnn/simple_rnn.py,sha256=35O9Yh_wQOzc5ZCVk5Zwrb15tlFFC6fMwmNxKS6v42o,17527
461
461
  keras/src/layers/rnn/stacked_rnn_cells.py,sha256=PYM7Qqp6qKFObRhu9v5NZ3iJnn-rgaCh0Hv4WebrkXY,4917
462
- keras/src/layers/rnn/time_distributed.py,sha256=lfmUcwmCDQXjB1hKZgQJnDU3rQ0bRwWc72SrUe7VYrg,5515
462
+ keras/src/layers/rnn/time_distributed.py,sha256=VlSA7geAAPDB8Q5mvLD6afkcb_f7a4KbCr2ybQ0CnCA,5916
463
463
  keras/src/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
464
464
  keras/src/legacy/backend.py,sha256=_RMWtH3ePnZotDzOswVlgMT45rbV_bV8snJYzr-Y2AE,70002
465
465
  keras/src/legacy/layers.py,sha256=oOaFtRtroSZpKL0z4tDWOpUbsrJhmuef6twESrSOmx8,8396
@@ -496,7 +496,7 @@ keras/src/models/model.py,sha256=szseM7sjfNkdOGytF25nijhjERBu_66WPSYaJ719VBY,424
496
496
  keras/src/models/sequential.py,sha256=CC9Q1BNB9m7TkgMHRyjOzhQvneng576wJpmdgHrACKY,14352
497
497
  keras/src/models/variable_mapping.py,sha256=FVtcgjBRqOxtvkzOE6kjG9SpcB9keDg2gS5LOTlXvG0,2181
498
498
  keras/src/ops/__init__.py,sha256=aORlvnrqY_eQl0EFLWdpHsXHnQ6JLSw1qhwJMr-VXJ0,644
499
- keras/src/ops/core.py,sha256=t06-MvptYb6ZVwmNj083JyUtzU4M6UTVXOT2vVHtKyU,42781
499
+ keras/src/ops/core.py,sha256=1L74Jox7wY6R_DFBzVVS3VjLlIKbE0sxyK5x-pjzx8Q,42779
500
500
  keras/src/ops/einops.py,sha256=-pxW0_AzDQNsR7t2TJrzvYXBJpmLYA3fJoO0U_U96PY,6268
501
501
  keras/src/ops/function.py,sha256=QV9n1-xeTPDK_FJ3sjlHDWVH2sqDj96R6YQnJueMOlA,17821
502
502
  keras/src/ops/image.py,sha256=Drfouun3Gaod0LNgG5nxrKkgIJ4STjWQWvzbTIjKOxs,67251
@@ -504,7 +504,7 @@ keras/src/ops/linalg.py,sha256=3V8S_cgNxZZCIFcFj-FBHTdRqWNbimDtumMvfoc0f30,26736
504
504
  keras/src/ops/math.py,sha256=4qYMJ5qAPmeSyeF63YWoGbUkQt6f4_VX0enOChU4mXU,37233
505
505
  keras/src/ops/nn.py,sha256=04gjHB2BWusy4tWm59EO5Ns1paJC5umDNGwNCKzaJWQ,104658
506
506
  keras/src/ops/node.py,sha256=aJgn9D-GkteE--Bbt2cZ9JjVxb2W2uS1OWEKoeLsl3Y,5583
507
- keras/src/ops/numpy.py,sha256=6-nCfjwd4y0oWLoL72ZTmyDu-kNLWdNlL4KDr6TsqC8,260893
507
+ keras/src/ops/numpy.py,sha256=1rIBEmWGxdHUSoSjy-JRaC-GAHPCa-MZ50bfoIdUvRE,262690
508
508
  keras/src/ops/operation.py,sha256=A7sh9Hi6kZb7wkeMmhrDQIq770ofANXuP-Qg-kwCM3o,15485
509
509
  keras/src/ops/operation_utils.py,sha256=C6eThl-haKzlDH0fC1rn5-P1P-pCfIfXs-fy-ADR534,14523
510
510
  keras/src/ops/symbolic_arguments.py,sha256=MKwXxZYkyouD9BPmQ1uUNxILdcwPvTayAqXaUV3P3o4,1628
@@ -618,7 +618,7 @@ keras/utils/bounding_boxes/__init__.py,sha256=jtvQll4u8ZY0Z96HwNhP1nxWEG9FM3gI-6
618
618
  keras/utils/legacy/__init__.py,sha256=oSYZz6uS8UxSElRaaJYWJEoweJ4GAasZjnn7fNaOlog,342
619
619
  keras/visualization/__init__.py,sha256=UKWmiy6sps4SWlmQi9WX8_Z53cPpLlphz2zIeHdwJpQ,722
620
620
  keras/wrappers/__init__.py,sha256=QkS-O5K8qGS7C3sytF8MpmO6PasATpNVGF8qtb7Ojsw,407
621
- keras_nightly-3.14.0.dev2026012304.dist-info/METADATA,sha256=WLjmq7_YlxrYTWWRdBTiOboC4zCj0j5pKexkiZZ6C6Y,6339
622
- keras_nightly-3.14.0.dev2026012304.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
623
- keras_nightly-3.14.0.dev2026012304.dist-info/top_level.txt,sha256=ptcw_-QuGZ4ZDjMdwi_Z0clZm8QAqFdvzzFnDEOTs9o,6
624
- keras_nightly-3.14.0.dev2026012304.dist-info/RECORD,,
621
+ keras_nightly-3.14.0.dev2026012404.dist-info/METADATA,sha256=pKZtGqykls2YgEuMm-dRviiJEGZMo_vUAFyym60nz_0,6339
622
+ keras_nightly-3.14.0.dev2026012404.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
623
+ keras_nightly-3.14.0.dev2026012404.dist-info/top_level.txt,sha256=ptcw_-QuGZ4ZDjMdwi_Z0clZm8QAqFdvzzFnDEOTs9o,6
624
+ keras_nightly-3.14.0.dev2026012404.dist-info/RECORD,,