keras-nightly 3.14.0.dev2026012604__py3-none-any.whl → 3.14.0.dev2026012804__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -246,6 +246,7 @@ from keras.src.ops.numpy import moveaxis as moveaxis
246
246
  from keras.src.ops.numpy import multiply as multiply
247
247
  from keras.src.ops.numpy import nan_to_num as nan_to_num
248
248
  from keras.src.ops.numpy import nanmax as nanmax
249
+ from keras.src.ops.numpy import nanmean as nanmean
249
250
  from keras.src.ops.numpy import nanmin as nanmin
250
251
  from keras.src.ops.numpy import nansum as nansum
251
252
  from keras.src.ops.numpy import ndim as ndim
@@ -130,6 +130,7 @@ from keras.src.ops.numpy import moveaxis as moveaxis
130
130
  from keras.src.ops.numpy import multiply as multiply
131
131
  from keras.src.ops.numpy import nan_to_num as nan_to_num
132
132
  from keras.src.ops.numpy import nanmax as nanmax
133
+ from keras.src.ops.numpy import nanmean as nanmean
133
134
  from keras.src.ops.numpy import nanmin as nanmin
134
135
  from keras.src.ops.numpy import nansum as nansum
135
136
  from keras.src.ops.numpy import ndim as ndim
keras/ops/__init__.py CHANGED
@@ -246,6 +246,7 @@ from keras.src.ops.numpy import moveaxis as moveaxis
246
246
  from keras.src.ops.numpy import multiply as multiply
247
247
  from keras.src.ops.numpy import nan_to_num as nan_to_num
248
248
  from keras.src.ops.numpy import nanmax as nanmax
249
+ from keras.src.ops.numpy import nanmean as nanmean
249
250
  from keras.src.ops.numpy import nanmin as nanmin
250
251
  from keras.src.ops.numpy import nansum as nansum
251
252
  from keras.src.ops.numpy import ndim as ndim
@@ -130,6 +130,7 @@ from keras.src.ops.numpy import moveaxis as moveaxis
130
130
  from keras.src.ops.numpy import multiply as multiply
131
131
  from keras.src.ops.numpy import nan_to_num as nan_to_num
132
132
  from keras.src.ops.numpy import nanmax as nanmax
133
+ from keras.src.ops.numpy import nanmean as nanmean
133
134
  from keras.src.ops.numpy import nanmin as nanmin
134
135
  from keras.src.ops.numpy import nansum as nansum
135
136
  from keras.src.ops.numpy import ndim as ndim
@@ -1018,6 +1018,11 @@ def nanmax(x, axis=None, keepdims=False):
1018
1018
  return jnp.nanmax(x, axis=axis, keepdims=keepdims)
1019
1019
 
1020
1020
 
1021
+ def nanmean(x, axis=None, keepdims=False):
1022
+ x = convert_to_tensor(x)
1023
+ return jnp.nanmean(x, axis=axis, keepdims=keepdims)
1024
+
1025
+
1021
1026
  def nanmin(x, axis=None, keepdims=False):
1022
1027
  x = convert_to_tensor(x)
1023
1028
  return jnp.nanmin(x, axis=axis, keepdims=keepdims)
@@ -964,6 +964,11 @@ def nanmax(x, axis=None, keepdims=False):
964
964
  return np.nanmax(x, axis=axis, keepdims=keepdims)
965
965
 
966
966
 
967
+ def nanmean(x, axis=None, keepdims=False):
968
+ dtype = dtypes.result_type(standardize_dtype(x.dtype), float)
969
+ return np.nanmean(x, axis=axis, keepdims=keepdims).astype(dtype)
970
+
971
+
967
972
  def nanmin(x, axis=None, keepdims=False):
968
973
  return np.nanmin(x, axis=axis, keepdims=keepdims)
969
974
 
@@ -2111,6 +2111,12 @@ def nanmax(x, axis=None, keepdims=False):
2111
2111
  raise NotImplementedError("`nanmax` is not supported with openvino backend")
2112
2112
 
2113
2113
 
2114
+ def nanmean(x, axis=None, keepdims=False):
2115
+ raise NotImplementedError(
2116
+ "`nanmean` is not supported with openvino backend"
2117
+ )
2118
+
2119
+
2114
2120
  def nanmin(x, axis=None, keepdims=False):
2115
2121
  raise NotImplementedError("`nanmin` is not supported with openvino backend")
2116
2122
 
@@ -2270,7 +2276,16 @@ def prod(x, axis=None, keepdims=False, dtype=None):
2270
2276
 
2271
2277
 
2272
2278
  def ptp(x, axis=None, keepdims=False):
2273
- raise NotImplementedError("`ptp` is not supported with openvino backend")
2279
+ if axis == ():
2280
+ return zeros_like(x)
2281
+ x = get_ov_output(x)
2282
+
2283
+ x_resolved, resolved_axis = _resolve_axis(x, axis)
2284
+
2285
+ max_val = ov_opset.reduce_max(x_resolved, resolved_axis, keepdims)
2286
+ min_val = ov_opset.reduce_min(x_resolved, resolved_axis, keepdims)
2287
+
2288
+ return OpenVINOKerasTensor(ov_opset.subtract(max_val, min_val).output(0))
2274
2289
 
2275
2290
 
2276
2291
  def quantile(x, q, axis=None, method="linear", keepdims=False):
@@ -2682,7 +2697,23 @@ def tensordot(x1, x2, axes=2):
2682
2697
 
2683
2698
 
2684
2699
  def round(x, decimals=0):
2685
- raise NotImplementedError("`round` is not supported with openvino backend")
2700
+ x = get_ov_output(x)
2701
+ x_type = x.get_element_type()
2702
+ if x_type.is_integral() or x_type == Type.boolean:
2703
+ x = ov_opset.convert(x, OPENVINO_DTYPES[config.floatx()])
2704
+
2705
+ if decimals == 0:
2706
+ result = ov_opset.round(x, "half_to_even")
2707
+ else:
2708
+ factor = ov_opset.constant(10.0**decimals, x.get_element_type())
2709
+ scaled = ov_opset.multiply(x, factor)
2710
+ rounded = ov_opset.round(scaled, "half_to_even")
2711
+ result = ov_opset.divide(rounded, factor)
2712
+
2713
+ if x_type.is_integral():
2714
+ result = ov_opset.convert(result, x_type)
2715
+
2716
+ return OpenVINOKerasTensor(result.output(0))
2686
2717
 
2687
2718
 
2688
2719
  def tile(x, repeats):
@@ -2803,7 +2834,17 @@ def vdot(x1, x2):
2803
2834
 
2804
2835
 
2805
2836
  def vstack(xs):
2806
- raise NotImplementedError("`vstack` is not supported with openvino backend")
2837
+ if not isinstance(xs, (list, tuple)):
2838
+ xs = (xs,)
2839
+ elems = [convert_to_tensor(elem) for elem in xs]
2840
+ element_type = elems[0].output.get_element_type()
2841
+ elems = [get_ov_output(elem, element_type) for elem in elems]
2842
+ axis = 0
2843
+ for i in range(1, len(elems)):
2844
+ elems[0], elems[i] = _align_operand_types(
2845
+ elems[0], elems[i], "vstack()"
2846
+ )
2847
+ return OpenVINOKerasTensor(ov_opset.concat(elems, axis).output(0))
2807
2848
 
2808
2849
 
2809
2850
  def vectorize(pyfunc, *, excluded=None, signature=None):
@@ -2859,9 +2900,20 @@ def divide(x1, x2):
2859
2900
 
2860
2901
 
2861
2902
  def divide_no_nan(x1, x2):
2862
- raise NotImplementedError(
2863
- "`divide_no_nan` is not supported with openvino backend"
2864
- )
2903
+ element_type = None
2904
+ if isinstance(x1, OpenVINOKerasTensor):
2905
+ element_type = x1.output.get_element_type()
2906
+ if isinstance(x2, OpenVINOKerasTensor):
2907
+ element_type = x2.output.get_element_type()
2908
+ x1 = get_ov_output(x1, element_type)
2909
+ x2 = get_ov_output(x2, element_type)
2910
+ x1, x2 = _align_operand_types(x1, x2, "divide_no_nan()")
2911
+
2912
+ zero = ov_opset.constant(0, x2.get_element_type())
2913
+ div = ov_opset.divide(x1, x2)
2914
+ is_zero = ov_opset.equal(x2, zero)
2915
+ result = ov_opset.select(is_zero, zero, div)
2916
+ return OpenVINOKerasTensor(result.output(0))
2865
2917
 
2866
2918
 
2867
2919
  def true_divide(x1, x2):
@@ -2145,6 +2145,27 @@ def nanmax(x, axis=None, keepdims=False):
2145
2145
  )
2146
2146
 
2147
2147
 
2148
+ def nanmean(x, axis=None, keepdims=False):
2149
+ x = convert_to_tensor(x)
2150
+
2151
+ if axis == () or axis == []:
2152
+ return x
2153
+
2154
+ if not x.dtype.is_floating:
2155
+ return tf.reduce_mean(
2156
+ tf.cast(x, "float32"), axis=axis, keepdims=keepdims
2157
+ )
2158
+
2159
+ dtype = dtypes.result_type(standardize_dtype(x.dtype), float)
2160
+ total_sum = cast(nansum(x, axis=axis, keepdims=keepdims), dtype)
2161
+ normalizer = tf.reduce_sum(
2162
+ cast(~tf.math.is_nan(x), dtype),
2163
+ axis=axis,
2164
+ keepdims=keepdims,
2165
+ )
2166
+ return tf.divide(total_sum, normalizer)
2167
+
2168
+
2148
2169
  def nanmin(x, axis=None, keepdims=False):
2149
2170
  x = convert_to_tensor(x)
2150
2171
 
@@ -36,15 +36,31 @@ class TorchExportArchive:
36
36
  )
37
37
 
38
38
  sample_inputs = tree.map_structure(
39
- lambda x: convert_spec_to_tensor(x, replace_none_number=1),
39
+ lambda x: convert_spec_to_tensor(x, replace_none_number=2),
40
40
  input_signature,
41
41
  )
42
42
  sample_inputs = tuple(sample_inputs)
43
43
 
44
+ # Build dynamic_shapes from input_signature where shape has None
45
+ # Use a shared "batch" dim for dimension 0 across all inputs
46
+ batch_dim = torch.export.Dim("batch", min=1)
47
+ dynamic_shapes = []
48
+ for spec in input_signature:
49
+ dim_spec = {}
50
+ for dim_idx, dim_val in enumerate(spec.shape):
51
+ if dim_val is None:
52
+ if dim_idx == 0:
53
+ dim_spec[dim_idx] = batch_dim
54
+ else:
55
+ dim_spec[dim_idx] = torch.export.Dim(
56
+ f"dim_{len(dynamic_shapes)}_{dim_idx}", min=1
57
+ )
58
+ dynamic_shapes.append(dim_spec if dim_spec else None)
59
+ dynamic_shapes = tuple(dynamic_shapes) if any(dynamic_shapes) else None
60
+
44
61
  # Ref: torch_xla.tf_saved_model_integration
45
- # TODO: Utilize `dynamic_shapes`
46
62
  exported = torch.export.export(
47
- resource, sample_inputs, dynamic_shapes=None, strict=False
63
+ resource, sample_inputs, dynamic_shapes=dynamic_shapes, strict=False
48
64
  )
49
65
  options = torch_xla.stablehlo.StableHLOExportOptions(
50
66
  override_tracing_arguments=sample_inputs
@@ -1290,6 +1290,16 @@ def nanmax(x, axis=None, keepdims=False):
1290
1290
  )
1291
1291
 
1292
1292
 
1293
+ def nanmean(x, axis=None, keepdims=False):
1294
+ x = convert_to_tensor(x)
1295
+
1296
+ if axis == () or axis == []:
1297
+ return x
1298
+
1299
+ dtype = dtypes.result_type(standardize_dtype(x.dtype), float)
1300
+ return torch.nanmean(cast(x, dtype), dim=axis, keepdim=keepdims)
1301
+
1302
+
1293
1303
  def nanmin(x, axis=None, keepdims=False):
1294
1304
  x = convert_to_tensor(x)
1295
1305
  if not torch.is_floating_point(x):
@@ -8,6 +8,26 @@ from keras.src.layers.input_spec import InputSpec
8
8
  from keras.src.layers.layer import Layer
9
9
 
10
10
 
11
+ # TODO(abheesht17): Move this to utils?
12
+ def _clone_initializer(initializer):
13
+ """Clones an initializer to ensure a new seed.
14
+
15
+ Args:
16
+ initializer: The initializer to clone.
17
+
18
+ Returns:
19
+ A cloned initializer if it is clonable, otherwise the original one.
20
+
21
+ As of tensorflow 2.10, we need to clone user passed initializers when
22
+ invoking them twice to avoid creating the same randomized initialization.
23
+ """
24
+ if isinstance(initializer, initializers.Initializer):
25
+ config = initializer.get_config()
26
+ return initializer.__class__.from_config(config)
27
+ # If we get a string or dict, just return as we cannot and should not clone.
28
+ return initializer
29
+
30
+
11
31
  @keras_export("keras.layers.BatchNormalization")
12
32
  class BatchNormalization(Layer):
13
33
  """Layer that normalizes its inputs.
@@ -72,6 +92,22 @@ class BatchNormalization(Layer):
72
92
  variance) for the layer across all devices at each training step
73
93
  in a distributed training strategy.
74
94
  If `False`, each replica uses its own local batch statistics.
95
+ renorm: Whether to use
96
+ [Batch Renormalization](https://arxiv.org/abs/1702.03275). This
97
+ adds extra variables during training. The inference is the same
98
+ for either value of this parameter.
99
+ renorm_clipping: Dictionary, valid only if `renorm = True`.
100
+ Maps optional keys `"rmax"`, `"rmin"`, `"dmax"` to floats used to
101
+ clip the renorm correction. The correction `(r, d)` is used as
102
+ `corrected_value = normalized_value * r + d`, with `r` clipped to
103
+ `[rmin, rmax]`, and `d` to `[-dmax, dmax]`. Missing `rmax`, `rmin`,
104
+ `dmax` are set to `inf`, `0`, `inf`, respectively.
105
+ renorm_momentum: Momentum used to update the moving means and standard
106
+ deviations with renorm. Valid only if `renorm= True`. Unlike
107
+ `momentum`, this affects training and should be neither too small
108
+ (which would add noise) nor too large (which would give stale
109
+ estimates). Note that `momentum` is still applied to get the means
110
+ and variances for inference.
75
111
  **kwargs: Base layer keyword arguments (e.g. `name` and `dtype`).
76
112
 
77
113
  Call arguments:
@@ -136,6 +172,9 @@ class BatchNormalization(Layer):
136
172
  gamma_regularizer=None,
137
173
  beta_constraint=None,
138
174
  gamma_constraint=None,
175
+ renorm=False,
176
+ renorm_clipping=None,
177
+ renorm_momentum=0.99,
139
178
  synchronized=False,
140
179
  **kwargs,
141
180
  ):
@@ -165,6 +204,33 @@ class BatchNormalization(Layer):
165
204
  self.gamma_constraint = constraints.get(gamma_constraint)
166
205
  self.supports_masking = True
167
206
 
207
+ self.renorm = renorm
208
+ if renorm:
209
+ renorm_clipping = renorm_clipping or {}
210
+ keys = ["rmax", "rmin", "dmax"]
211
+ if set(renorm_clipping) - set(keys):
212
+ raise ValueError(
213
+ "Received invalid keys for `renorm_clipping` argument: "
214
+ f"{renorm_clipping}. Supported values: {keys}."
215
+ )
216
+ rmax = renorm_clipping.get("rmax")
217
+ rmin = renorm_clipping.get("rmin")
218
+ dmax = renorm_clipping.get("dmax")
219
+
220
+ if rmax is not None and rmin is not None and rmax < rmin:
221
+ raise ValueError(
222
+ "rmax should be greater than rmin in the `renorm_clipping` "
223
+ "argument. Received: rmax={rmax}, rmin={rmin}."
224
+ )
225
+ if dmax is not None and dmax < 0:
226
+ raise ValueError(
227
+ "dmax should be non-negative in the `renorm_clipping` "
228
+ """argument. Received: dmax={dmax}."""
229
+ )
230
+
231
+ self.renorm_clipping = renorm_clipping
232
+ self.renorm_momentum = renorm_momentum
233
+
168
234
  self.gamma = None
169
235
  self.beta = None
170
236
  self.moving_mean = None
@@ -208,6 +274,40 @@ class BatchNormalization(Layer):
208
274
  autocast=False,
209
275
  )
210
276
 
277
+ if self.renorm:
278
+ # In batch renormalization we track the inference moving stddev
279
+ # instead of the moving variance to more closely align with the
280
+ # paper. The stddev is initialized as sqrt of the variance
281
+ # initializer.
282
+ def moving_stddev_initializer(shape, dtype=None):
283
+ cloned = _clone_initializer(self.moving_variance_initializer)
284
+ return ops.sqrt(cloned(shape, dtype=dtype))
285
+
286
+ self.moving_stddev = self.add_weight(
287
+ shape=shape,
288
+ name="moving_stddev",
289
+ initializer=moving_stddev_initializer,
290
+ trainable=False,
291
+ autocast=False,
292
+ )
293
+ # Create variables to maintain the moving mean and standard
294
+ # deviation. These are used in training and thus are different
295
+ # from the moving averages above.
296
+ self.renorm_mean = self.add_weight(
297
+ shape=shape,
298
+ name="renorm_mean",
299
+ initializer=_clone_initializer(self.moving_mean_initializer),
300
+ trainable=False,
301
+ autocast=False,
302
+ )
303
+ self.renorm_stddev = self.add_weight(
304
+ shape=shape,
305
+ name="renorm_stddev",
306
+ initializer=moving_stddev_initializer,
307
+ trainable=False,
308
+ autocast=False,
309
+ )
310
+
211
311
  self.input_spec = InputSpec(
212
312
  ndim=len(input_shape), axes={self.axis: input_shape[self.axis]}
213
313
  )
@@ -250,20 +350,6 @@ class BatchNormalization(Layer):
250
350
  moving_mean = ops.cast(self.moving_mean, inputs.dtype)
251
351
  moving_variance = ops.cast(self.moving_variance, inputs.dtype)
252
352
 
253
- if training and self.trainable:
254
- mean, variance = self._moments(inputs, mask)
255
-
256
- self.moving_mean.assign(
257
- moving_mean * self.momentum + mean * (1.0 - self.momentum)
258
- )
259
- self.moving_variance.assign(
260
- moving_variance * self.momentum
261
- + variance * (1.0 - self.momentum)
262
- )
263
- else:
264
- mean = moving_mean
265
- variance = moving_variance
266
-
267
353
  if self.scale:
268
354
  gamma = ops.cast(self.gamma, inputs.dtype)
269
355
  else:
@@ -274,6 +360,39 @@ class BatchNormalization(Layer):
274
360
  else:
275
361
  beta = None
276
362
 
363
+ if training and self.trainable:
364
+ mean, variance = self._moments(inputs, mask)
365
+
366
+ if self.renorm:
367
+ # Compute renorm corrections (r and d).
368
+ (
369
+ r,
370
+ d,
371
+ mean,
372
+ variance,
373
+ ) = self._renorm_correction_and_moments(mean, variance)
374
+
375
+ # x = x * gamma + beta without renorm, and
376
+ # (x * r + d) * gamma + beta = x * (r * gamma) + (d * gamma +
377
+ # beta) with renorm.
378
+ gamma, beta = self._compose_transforms(
379
+ r, d, gamma, beta, inputs.dtype
380
+ )
381
+
382
+ # Update moving statistics.
383
+ self._update_renorm_statistics(mean, variance)
384
+ else:
385
+ self.moving_mean.assign(
386
+ moving_mean * self.momentum + mean * (1.0 - self.momentum)
387
+ )
388
+ self.moving_variance.assign(
389
+ moving_variance * self.momentum
390
+ + variance * (1.0 - self.momentum)
391
+ )
392
+ else:
393
+ mean = moving_mean
394
+ variance = moving_variance
395
+
277
396
  outputs = ops.batch_normalization(
278
397
  x=inputs,
279
398
  mean=mean,
@@ -306,6 +425,9 @@ class BatchNormalization(Layer):
306
425
  "beta_constraint": constraints.serialize(self.beta_constraint),
307
426
  "gamma_constraint": constraints.serialize(self.gamma_constraint),
308
427
  "synchronized": self.synchronized,
428
+ "renorm": self.renorm,
429
+ "renorm_clipping": self.renorm_clipping,
430
+ "renorm_momentum": self.renorm_momentum,
309
431
  }
310
432
  return {**base_config, **config}
311
433
 
@@ -346,3 +468,115 @@ class BatchNormalization(Layer):
346
468
  variance = weighted_distsq / (sum_of_weights + backend.epsilon())
347
469
 
348
470
  return ops.squeeze(mean), ops.squeeze(variance)
471
+
472
+ def _renorm_correction_and_moments(self, mean, variance):
473
+ """Computes the correction for batch renormalization.
474
+
475
+ This method computes the r and d correction factors.
476
+
477
+ Args:
478
+ mean: The mean of the current batch.
479
+ variance: The variance of the current batch.
480
+
481
+ Returns:
482
+ A tuple (r, s, mean, variance) where r and d are the correction
483
+ factors, and mean/variance are passed through unchanged.
484
+ """
485
+ stddev = ops.sqrt(variance + self.epsilon)
486
+
487
+ # Get the renorm moving statistics.
488
+ renorm_mean = ops.cast(self.renorm_mean, mean.dtype)
489
+ # Avoid divide by zero early on in training.
490
+ renorm_stddev = ops.maximum(
491
+ ops.cast(self.renorm_stddev, mean.dtype),
492
+ ops.sqrt(ops.cast(self.epsilon, mean.dtype)),
493
+ )
494
+
495
+ # Compute the corrections for batch renorm.
496
+ r = ops.divide(stddev, renorm_stddev)
497
+ d = ops.divide(ops.subtract(mean, renorm_mean), renorm_stddev)
498
+
499
+ # Apply clipping.
500
+ rmin = self.renorm_clipping.get("rmin")
501
+ rmax = self.renorm_clipping.get("rmax")
502
+ dmax = self.renorm_clipping.get("dmax")
503
+
504
+ if rmin is not None:
505
+ r = ops.maximum(r, rmin)
506
+ if rmax is not None:
507
+ r = ops.minimum(r, rmax)
508
+ if dmax is not None:
509
+ d = ops.clip(d, -dmax, dmax)
510
+
511
+ return r, d, mean, variance
512
+
513
+ def _compose_transforms(self, r, d, gamma, beta, dtype):
514
+ """Composes the renorm correction with gamma and beta.
515
+
516
+ When training with renorm, the normalized values (x) are transformed
517
+ as: (x * r + d) * gamma + beta = x * (r * gamma) + (d * gamma + beta).
518
+ This method computes the effective scale and offset.
519
+
520
+ Args:
521
+ r: The r correction factor.
522
+ d: The d correction factor.
523
+ gamma: The gamma (scale) parameter, or None.
524
+ beta: The beta (offset) parameter, or None.
525
+ dtype: The dtype for the output.
526
+
527
+ Returns:
528
+ A tuple (effective_gamma, effective_beta).
529
+ """
530
+ r = ops.stop_gradient(r)
531
+ d = ops.stop_gradient(d)
532
+
533
+ if gamma is not None:
534
+ effective_gamma = ops.multiply(r, gamma)
535
+ effective_beta = ops.multiply(d, gamma)
536
+ else:
537
+ effective_gamma = ops.cast(r, dtype)
538
+ effective_beta = ops.cast(d, dtype)
539
+
540
+ if beta is not None:
541
+ effective_beta = ops.add(effective_beta, beta)
542
+
543
+ return effective_gamma, effective_beta
544
+
545
+ def _update_renorm_statistics(self, mean, variance):
546
+ """Updates the renorm and moving statistics.
547
+ Args:
548
+ mean: The mean of the current batch.
549
+ variance: The variance of the current batch.
550
+ """
551
+ stddev = ops.sqrt(variance + self.epsilon)
552
+
553
+ # Update renorm moving mean and stddev.
554
+ renorm_mean = ops.cast(self.renorm_mean, mean.dtype)
555
+ renorm_stddev = ops.cast(self.renorm_stddev, mean.dtype)
556
+
557
+ self.renorm_mean.assign(
558
+ renorm_mean * self.renorm_momentum
559
+ + mean * (1.0 - self.renorm_momentum)
560
+ )
561
+ self.renorm_stddev.assign(
562
+ renorm_stddev * self.renorm_momentum
563
+ + stddev * (1.0 - self.renorm_momentum)
564
+ )
565
+
566
+ moving_mean = ops.cast(self.moving_mean, mean.dtype)
567
+ moving_stddev = ops.cast(self.moving_stddev, mean.dtype)
568
+
569
+ self.moving_mean.assign(
570
+ moving_mean * self.momentum + mean * (1.0 - self.momentum)
571
+ )
572
+
573
+ new_moving_stddev = moving_stddev * self.momentum + stddev * (
574
+ 1.0 - self.momentum
575
+ )
576
+ self.moving_stddev.assign(new_moving_stddev)
577
+
578
+ # Derive `moving_variance` from `moving_stddev`, applying ReLU in case
579
+ # floating point rounding causes it to go negative.
580
+ self.moving_variance.assign(
581
+ ops.relu(new_moving_stddev * new_moving_stddev - self.epsilon)
582
+ )
keras/src/ops/numpy.py CHANGED
@@ -5121,6 +5121,60 @@ def nanmax(x, axis=None, keepdims=False):
5121
5121
  return backend.numpy.nanmax(x, axis=axis, keepdims=keepdims)
5122
5122
 
5123
5123
 
5124
+ class Nanmean(Operation):
5125
+ def __init__(self, axis=None, keepdims=False, *, name=None):
5126
+ super().__init__(name=name)
5127
+ self.axis = axis
5128
+ self.keepdims = keepdims
5129
+
5130
+ def call(self, x):
5131
+ return backend.numpy.nanmean(x, axis=self.axis, keepdims=self.keepdims)
5132
+
5133
+ def compute_output_spec(self, x):
5134
+ dtype = dtypes.result_type(x.dtype, float)
5135
+ return KerasTensor(
5136
+ reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims),
5137
+ dtype=dtype,
5138
+ )
5139
+
5140
+
5141
+ @keras_export(["keras.ops.nanmean", "keras.ops.numpy.nanmean"])
5142
+ def nanmean(x, axis=None, keepdims=False):
5143
+ """Mean of a tensor over the given axes, ignoring NaNs.
5144
+
5145
+ Args:
5146
+ x: Input tensor.
5147
+ axis: Axis or axes along which the mean is computed.
5148
+ The default is to compute the mean of the flattened tensor.
5149
+ keepdims: If this is set to `True`, the axes which are reduced are left
5150
+ in the result as dimensions with size one. Defaults
5151
+ to `False`.
5152
+
5153
+ Returns:
5154
+ Output tensor containing the mean, with NaN values ignored.
5155
+ If all values along a reduced axis are NaN, the result is NaN.
5156
+
5157
+ Examples:
5158
+ >>> import numpy as np
5159
+ >>> from keras import ops
5160
+ >>> x = np.array([[1.0, np.nan, 3.0],
5161
+ ... [np.nan, 2.0, 1.0]])
5162
+ >>> ops.nanmean(x)
5163
+ 1.75
5164
+
5165
+ >>> ops.nanmean(x, axis=1)
5166
+ array([2., 1.5])
5167
+
5168
+ >>> ops.nanmean(x, axis=1, keepdims=True)
5169
+ array([[2. ],
5170
+ [1.5]])
5171
+ """
5172
+ if any_symbolic_tensors((x,)):
5173
+ return Nanmean(axis=axis, keepdims=keepdims).symbolic_call(x)
5174
+
5175
+ return backend.numpy.nanmean(x, axis=axis, keepdims=keepdims)
5176
+
5177
+
5124
5178
  class Nanmin(Operation):
5125
5179
  def __init__(self, axis=None, keepdims=False, *, name=None):
5126
5180
  super().__init__(name=name)
@@ -509,9 +509,15 @@ class KerasFileEditor:
509
509
  # ------------------------------------------------------
510
510
 
511
511
  # Skip any objects that are not proper datasets
512
- if not hasattr(value, "shape") or not hasattr(value, "dtype"):
512
+ if not isinstance(value, h5py.Dataset):
513
513
  continue
514
514
 
515
+ if value.external:
516
+ raise ValueError(
517
+ "Not allowed: H5 file Dataset with external links: "
518
+ f"{value.external}"
519
+ )
520
+
515
521
  shape = value.shape
516
522
  dtype = value.dtype
517
523
 
@@ -796,7 +796,8 @@ def _load_state(
796
796
  try:
797
797
  saveable.load_own_variables(weights_store.get(inner_path))
798
798
  except Exception as e:
799
- failed_saveables.add(id(saveable))
799
+ if failed_saveables is not None:
800
+ failed_saveables.add(id(saveable))
800
801
  error_msgs[id(saveable)] = saveable, e
801
802
  failure = True
802
803
  else:
@@ -807,7 +808,8 @@ def _load_state(
807
808
  try:
808
809
  saveable.load_assets(assets_store.get(inner_path))
809
810
  except Exception as e:
810
- failed_saveables.add(id(saveable))
811
+ if failed_saveables is not None:
812
+ failed_saveables.add(id(saveable))
811
813
  error_msgs[id(saveable)] = saveable, e
812
814
  failure = True
813
815
  else:
@@ -855,7 +857,7 @@ def _load_state(
855
857
  if not failure:
856
858
  if visited_saveables is not None and newly_failed <= 0:
857
859
  visited_saveables.add(id(saveable))
858
- if id(saveable) in failed_saveables:
860
+ if failed_saveables is not None and id(saveable) in failed_saveables:
859
861
  failed_saveables.remove(id(saveable))
860
862
  error_msgs.pop(id(saveable))
861
863
 
@@ -1035,6 +1037,25 @@ class H5IOStore:
1035
1037
  # will mistakenly using `__len__` to determine the value.
1036
1038
  return self.h5_file.__bool__()
1037
1039
 
1040
+ def _verify_group(self, group):
1041
+ if not isinstance(group, h5py.Group):
1042
+ raise ValueError(
1043
+ f"Invalid H5 file, expected Group but received {type(group)}"
1044
+ )
1045
+ return group
1046
+
1047
+ def _verify_dataset(self, dataset):
1048
+ if not isinstance(dataset, h5py.Dataset):
1049
+ raise ValueError(
1050
+ f"Invalid H5 file, expected Dataset, received {type(dataset)}"
1051
+ )
1052
+ if dataset.external:
1053
+ raise ValueError(
1054
+ "Not allowed: H5 file Dataset with external links: "
1055
+ f"{dataset.external}"
1056
+ )
1057
+ return dataset
1058
+
1038
1059
  def _get_h5_file(self, path_or_io, mode=None):
1039
1060
  mode = mode or self.mode
1040
1061
  if mode not in ("r", "w", "a"):
@@ -1094,15 +1115,19 @@ class H5IOStore:
1094
1115
  self._h5_entry_group = {} # Defaults to an empty dict if not found.
1095
1116
  if not path:
1096
1117
  if "vars" in self.h5_file:
1097
- self._h5_entry_group = self.h5_file["vars"]
1118
+ self._h5_entry_group = self._verify_group(self.h5_file["vars"])
1098
1119
  elif path in self.h5_file and "vars" in self.h5_file[path]:
1099
- self._h5_entry_group = self.h5_file[path]["vars"]
1120
+ self._h5_entry_group = self._verify_group(
1121
+ self._verify_group(self.h5_file[path])["vars"]
1122
+ )
1100
1123
  else:
1101
1124
  # No hit. Fix for 2.13 compatibility.
1102
1125
  if "_layer_checkpoint_dependencies" in self.h5_file:
1103
1126
  path = path.replace("layers", "_layer_checkpoint_dependencies")
1104
1127
  if path in self.h5_file and "vars" in self.h5_file[path]:
1105
- self._h5_entry_group = self.h5_file[path]["vars"]
1128
+ self._h5_entry_group = self._verify_group(
1129
+ self._verify_group(self.h5_file[path])["vars"]
1130
+ )
1106
1131
  self._h5_entry_initialized = True
1107
1132
  return self
1108
1133
 
@@ -1134,25 +1159,15 @@ class H5IOStore:
1134
1159
  def keys(self):
1135
1160
  return self._h5_entry_group.keys()
1136
1161
 
1137
- def items(self):
1138
- return self._h5_entry_group.items()
1139
-
1140
- def values(self):
1141
- return self._h5_entry_group.values()
1142
-
1143
1162
  def __getitem__(self, key):
1144
- value = self._h5_entry_group[key]
1163
+ value = self._verify_dataset(self._h5_entry_group[key])
1145
1164
  if (
1146
1165
  hasattr(value, "attrs")
1147
1166
  and "dtype" in value.attrs
1148
1167
  and value.attrs["dtype"] == "bfloat16"
1149
1168
  ):
1150
1169
  value = np.array(value, dtype=ml_dtypes.bfloat16)
1151
- elif (
1152
- hasattr(value, "shape")
1153
- and hasattr(value, "dtype")
1154
- and not isinstance(value, np.ndarray)
1155
- ):
1170
+ elif not isinstance(value, np.ndarray):
1156
1171
  value = np.array(value)
1157
1172
  return value
1158
1173
 
@@ -1355,15 +1370,13 @@ class ShardedH5IOStore(H5IOStore):
1355
1370
  self._get_h5_group(self._h5_entry_path)
1356
1371
 
1357
1372
  def _restore_h5_file(self):
1358
- """Ensure the current shard is the last one created.
1359
-
1360
- We use mode="a" to avoid truncating the file during the switching.
1361
- """
1373
+ """Ensure the current shard is the last one created."""
1362
1374
  if (
1363
1375
  pathlib.Path(self.h5_file.filename).name
1364
1376
  != self.current_shard_path.name
1365
1377
  ):
1366
- self._switch_h5_file(self.current_shard_path.name, mode="a")
1378
+ mode = "a" if self.mode == "w" else "r"
1379
+ self._switch_h5_file(self.current_shard_path.name, mode=mode)
1367
1380
 
1368
1381
  # H5 entry level methods.
1369
1382
 
@@ -1371,9 +1384,11 @@ class ShardedH5IOStore(H5IOStore):
1371
1384
  """Get the H5 entry group. If it doesn't exist, return an empty dict."""
1372
1385
  try:
1373
1386
  if not path:
1374
- self._h5_entry_group = self.h5_file["vars"]
1387
+ self._h5_entry_group = self._verify_group(self.h5_file["vars"])
1375
1388
  else:
1376
- self._h5_entry_group = self.h5_file[path]["vars"]
1389
+ self._h5_entry_group = self._verify_group(
1390
+ self._verify_group(self.h5_file[path])["vars"]
1391
+ )
1377
1392
  self._h5_entry_initialized = True
1378
1393
  except KeyError:
1379
1394
  self._h5_entry_group = {}
@@ -1392,33 +1407,17 @@ class ShardedH5IOStore(H5IOStore):
1392
1407
  return total_len
1393
1408
 
1394
1409
  def keys(self):
1395
- keys = set(self._h5_entry_group.keys())
1410
+ keys = []
1411
+ current_shard_keys = list(self._h5_entry_group.keys())
1396
1412
  for filename in self.current_shard_filenames:
1397
1413
  if filename == self.current_shard_path.name:
1398
- continue
1399
- self._switch_h5_file(filename, mode="r")
1400
- keys.update(self._h5_entry_group.keys())
1414
+ keys += current_shard_keys
1415
+ else:
1416
+ self._switch_h5_file(filename, mode="r")
1417
+ keys += list(self._h5_entry_group.keys())
1401
1418
  self._restore_h5_file()
1402
1419
  return keys
1403
1420
 
1404
- def items(self):
1405
- yield from self._h5_entry_group.items()
1406
- for filename in self.current_shard_filenames:
1407
- if filename == self.current_shard_path.name:
1408
- continue
1409
- self._switch_h5_file(filename, mode="r")
1410
- yield from self._h5_entry_group.items()
1411
- self._restore_h5_file()
1412
-
1413
- def values(self):
1414
- yield from self._h5_entry_group.values()
1415
- for filename in self.current_shard_filenames:
1416
- if filename == self.current_shard_path.name:
1417
- continue
1418
- self._switch_h5_file(filename, mode="r")
1419
- yield from self._h5_entry_group.values()
1420
- self._restore_h5_file()
1421
-
1422
1421
  def __getitem__(self, key):
1423
1422
  if key in self._h5_entry_group:
1424
1423
  return super().__getitem__(key)
keras/src/version.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from keras.src.api_export import keras_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "3.14.0.dev2026012604"
4
+ __version__ = "3.14.0.dev2026012804"
5
5
 
6
6
 
7
7
  @keras_export("keras.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: keras-nightly
3
- Version: 3.14.0.dev2026012604
3
+ Version: 3.14.0.dev2026012804
4
4
  Summary: Multi-backend Keras
5
5
  Author-email: Keras team <keras-users@googlegroups.com>
6
6
  License: Apache License 2.0
@@ -45,11 +45,11 @@ keras/_tf_keras/keras/losses/__init__.py,sha256=xBc_KOtSLwp3h3CKQ0EnCuIy-Bsak2SP
45
45
  keras/_tf_keras/keras/metrics/__init__.py,sha256=_wF31PTvua5ahF9JEW4Hx1UVNjVCLqVI8J5JNrZCBf8,6546
46
46
  keras/_tf_keras/keras/mixed_precision/__init__.py,sha256=AM51CzHqzcY75tqdpQiuVcTRUEpUzBqeb-EfLeSDSV8,727
47
47
  keras/_tf_keras/keras/models/__init__.py,sha256=83pyA0pzytqin8JLV6FEbPreCb-V64ToebxFGrHsVdQ,501
48
- keras/_tf_keras/keras/ops/__init__.py,sha256=j1hmnwcCXYOBqSaKHODKVyBwwS6N38KfC-W8_t9_GmU,15716
48
+ keras/_tf_keras/keras/ops/__init__.py,sha256=q6oC17b27vXFUPbuWYjiA4_dVvXeyZwz0LBfOK2vVOQ,15767
49
49
  keras/_tf_keras/keras/ops/image/__init__.py,sha256=oM_PLh5Jk9OGfi1bbJcfWkjoq0Ye5JQG9a7v_KzDfoc,1034
50
50
  keras/_tf_keras/keras/ops/linalg/__init__.py,sha256=0ab6icK3yuIm4khSfAksGRFLEAJhaOu6gGgarau4iEQ,822
51
51
  keras/_tf_keras/keras/ops/nn/__init__.py,sha256=2eD8IlkfBrsmJjHpzsxMM3_058oGeZVgohdBd27iDnI,2992
52
- keras/_tf_keras/keras/ops/numpy/__init__.py,sha256=EEX_wcRRiw4E4Tq8Bm-n5AVi-OotP8FMdLRni12oEZQ,9778
52
+ keras/_tf_keras/keras/ops/numpy/__init__.py,sha256=Ap9vHiPPbJTmGIXYBsq1-mxHqWP_lQDPgJu6u1M2CJo,9829
53
53
  keras/_tf_keras/keras/optimizers/__init__.py,sha256=1fx0vEB-oGu-9dumxoIvX4qVHdgJvf74OLyYoBkE2y0,1267
54
54
  keras/_tf_keras/keras/optimizers/legacy/__init__.py,sha256=uIMQESCV80Q0FY-9ikQUjXYPyZqmTfAM3dfohQ5DzYs,516
55
55
  keras/_tf_keras/keras/optimizers/schedules/__init__.py,sha256=pQF3rQiAPuUSTUdflTr-fpL77oyGIv9xzGdjae3M3kw,1120
@@ -111,11 +111,11 @@ keras/losses/__init__.py,sha256=VIXBHQFNdLUPZ7JuwtIKj_4E-xf2yvNyrmdklvjr_xM,3667
111
111
  keras/metrics/__init__.py,sha256=qeEwtqpSCAaCr8BMUv1eVaqJl2Zb83OB5K0BG3JB0nI,6245
112
112
  keras/mixed_precision/__init__.py,sha256=AM51CzHqzcY75tqdpQiuVcTRUEpUzBqeb-EfLeSDSV8,727
113
113
  keras/models/__init__.py,sha256=83pyA0pzytqin8JLV6FEbPreCb-V64ToebxFGrHsVdQ,501
114
- keras/ops/__init__.py,sha256=j1hmnwcCXYOBqSaKHODKVyBwwS6N38KfC-W8_t9_GmU,15716
114
+ keras/ops/__init__.py,sha256=q6oC17b27vXFUPbuWYjiA4_dVvXeyZwz0LBfOK2vVOQ,15767
115
115
  keras/ops/image/__init__.py,sha256=oM_PLh5Jk9OGfi1bbJcfWkjoq0Ye5JQG9a7v_KzDfoc,1034
116
116
  keras/ops/linalg/__init__.py,sha256=0ab6icK3yuIm4khSfAksGRFLEAJhaOu6gGgarau4iEQ,822
117
117
  keras/ops/nn/__init__.py,sha256=2eD8IlkfBrsmJjHpzsxMM3_058oGeZVgohdBd27iDnI,2992
118
- keras/ops/numpy/__init__.py,sha256=EEX_wcRRiw4E4Tq8Bm-n5AVi-OotP8FMdLRni12oEZQ,9778
118
+ keras/ops/numpy/__init__.py,sha256=Ap9vHiPPbJTmGIXYBsq1-mxHqWP_lQDPgJu6u1M2CJo,9829
119
119
  keras/optimizers/__init__.py,sha256=1fx0vEB-oGu-9dumxoIvX4qVHdgJvf74OLyYoBkE2y0,1267
120
120
  keras/optimizers/legacy/__init__.py,sha256=uIMQESCV80Q0FY-9ikQUjXYPyZqmTfAM3dfohQ5DzYs,516
121
121
  keras/optimizers/schedules/__init__.py,sha256=pQF3rQiAPuUSTUdflTr-fpL77oyGIv9xzGdjae3M3kw,1120
@@ -128,7 +128,7 @@ keras/regularizers/__init__.py,sha256=542Shphw7W8h4Dyf2rmqMKUECVZ8IVBvN9g1LWhz-b
128
128
  keras/saving/__init__.py,sha256=KvL2GZxjvgFgEhvEnkvqjIR9JSNHKz-NWZacXajsjLI,1298
129
129
  keras/src/__init__.py,sha256=Gi4S7EiCMkE03PbdGNpFdaUYySWDs_FcAJ8Taz9Y1BE,684
130
130
  keras/src/api_export.py,sha256=gXOkBOnmscV013WAc75lc4Up01-Kkg9EylIAT_QWctg,1173
131
- keras/src/version.py,sha256=IO1BtTEdIsNKjrOQPKMlR2VK-1cLzdW1S-Ep5EqpM-g,204
131
+ keras/src/version.py,sha256=19dG5h6eOc-VKk0odlobQO7lPSyHV89NVu3r2Y6bzco,204
132
132
  keras/src/activations/__init__.py,sha256=0nL3IFDB9unlrMz8ninKOWo-uCHasTUpTo1tXZb2u44,4433
133
133
  keras/src/activations/activations.py,sha256=mogPggtp4CGldI3VOPNmesRxp6EbiR1_i4KLGaVwzL8,17614
134
134
  keras/src/applications/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -171,7 +171,7 @@ keras/src/backend/jax/layer.py,sha256=o6CicT06udwamTRQIjNSDLZLyYHFzBXNbxewXgWe0i
171
171
  keras/src/backend/jax/linalg.py,sha256=LDaLZYz49ChE2kJR3YpaM9xuwusvd3krV7nNAAazTWA,2642
172
172
  keras/src/backend/jax/math.py,sha256=1IEDpdoF8e5ltu3D4wbDQuihzvJHhMXz8W9Z_E-eJqU,9391
173
173
  keras/src/backend/jax/nn.py,sha256=mrRawNvf9EWe8rdTwK_Auz6xdLkVG6hH0nIAP7hyUDE,60271
174
- keras/src/backend/jax/numpy.py,sha256=XMBWC4MM3eOCm2tePDT1J-Bj8h6SH0VWvArGGe0-2X8,39125
174
+ keras/src/backend/jax/numpy.py,sha256=9d3zfOWJYO-BaIWXpUi-etaRkwbrb3wzFfjWPOoV9pI,39255
175
175
  keras/src/backend/jax/optimizer.py,sha256=5DeXQHcYmUI6F9i1m1VHn3sBt4LEStOeBXnKdESevLM,4134
176
176
  keras/src/backend/jax/random.py,sha256=Uk2huGIk_dlzMrx5eDVrrr2TeCEMitn2vr4yzA0NXjs,3594
177
177
  keras/src/backend/jax/rnn.py,sha256=Ycq0qfLY4M4jhltvztpLQyywjEM17T7CZQFh4hhHOUE,7767
@@ -186,7 +186,7 @@ keras/src/backend/numpy/layer.py,sha256=dTk7W7ql7vRgll7JbOXK5PlIhQw5VHdpSjKciHd8
186
186
  keras/src/backend/numpy/linalg.py,sha256=uzLTxEyuX_gDcnoA5Q59GdTg33py0WooKK5T6T9Td6c,2543
187
187
  keras/src/backend/numpy/math.py,sha256=HdkEA5ro7dtQBTP78GFIgqTFLgNQ49PXHhqI1vLRGfo,10169
188
188
  keras/src/backend/numpy/nn.py,sha256=P9JAnTlwSTI7bVv8WIv1pDQJHpjML_WJ0RsJWy-LJMc,46200
189
- keras/src/backend/numpy/numpy.py,sha256=UlqAexse7-DskEkQWw5mW2mXI-vBo6NO3jAoPwMzMM4,38266
189
+ keras/src/backend/numpy/numpy.py,sha256=FbT39wJdKezY_rHGTQEOTUTnkpn-VGWV6fMDI83vliU,38446
190
190
  keras/src/backend/numpy/random.py,sha256=wx2nE75q7L2cBMjtQlQx8yKMj4Ie3puFMDQsbrZO8SA,3961
191
191
  keras/src/backend/numpy/rnn.py,sha256=thOsMung1qR3lQsR4_D6hqKMFollQgrB0KwsJLk4BMY,7867
192
192
  keras/src/backend/numpy/trainer.py,sha256=MzWr8_LLHa1P6fxdUWirGw_lQwHGF_vkZ7RUGLUzjUs,11126
@@ -198,7 +198,7 @@ keras/src/backend/openvino/layer.py,sha256=5RdvaH1yOyPAphjKiuQAK1H_yZFYKE1Hp7c5b
198
198
  keras/src/backend/openvino/linalg.py,sha256=L6a4MFGND2wWzPVCh44cwuOgkcC4wJTo8Xy3HwW04lg,1614
199
199
  keras/src/backend/openvino/math.py,sha256=qw9kX2sJ2qr0dBJF12Ey0E2GcwixPUqoev6UcNra4NI,3944
200
200
  keras/src/backend/openvino/nn.py,sha256=zULPxdwVO7JDZUUtsuoEEPCLQ09ew8z8T6G_i_NEqrM,23741
201
- keras/src/backend/openvino/numpy.py,sha256=qmw4X3u0oJZptzqNHM33FPjTd_Ku6xY9dxxYFqW8c0g,109966
201
+ keras/src/backend/openvino/numpy.py,sha256=KzwfF8A1RPvmZCiZZqM0B5XELQNfyCYMAnb60Cxn2Jw,111780
202
202
  keras/src/backend/openvino/random.py,sha256=4hRUtIP6qJxO3Qy9uH1x6jSuJna3nWPdUf4x2QU8-ew,5575
203
203
  keras/src/backend/openvino/rnn.py,sha256=ErmuZLPSgG9qU-NfYPPvBZ6Ysy8k-fA4g19Vhqq7OVQ,866
204
204
  keras/src/backend/openvino/trainer.py,sha256=bMmtSALqydqdS6ke-5sYW5fgxZDshDH810p_C0xCRTg,9087
@@ -211,7 +211,7 @@ keras/src/backend/tensorflow/layer.py,sha256=69d40LwL4HhKRsCjj1VRpjfrQXXF8VV3vh0
211
211
  keras/src/backend/tensorflow/linalg.py,sha256=_lZVfdY1tFvrN7xwbt3INGoTR0yC5v-kI1Q0XppVibY,8773
212
212
  keras/src/backend/tensorflow/math.py,sha256=zTu_7Ff6B2Ro862z_xH0OCmIWbV74DjsO5UnfjYuOUQ,12370
213
213
  keras/src/backend/tensorflow/nn.py,sha256=6vtZHzUED6_blUPE1Tnc3GAxPpJ2ebxoaiMn80tTL9k,51328
214
- keras/src/backend/tensorflow/numpy.py,sha256=NHaXiWpFYVIUrZDTz3XaUKcN1AYsI9PGnlJmksOIOY8,106315
214
+ keras/src/backend/tensorflow/numpy.py,sha256=8yiH61_ML4JRkZIYVysVnH3XdVaUVIWG1NG8dNrONWc,106883
215
215
  keras/src/backend/tensorflow/optimizer.py,sha256=kFlyEOnGjEYdLpd8mpwhUeku78__xBfZbbrDWpJrq60,9307
216
216
  keras/src/backend/tensorflow/random.py,sha256=iO8V_soaDXZm9ewyAVbjudhsMj08C348c9Bz64nxXC4,6475
217
217
  keras/src/backend/tensorflow/rnn.py,sha256=JbOSpt48cm612c7YwiTYOQCQsNXyI_6QeRhtUn8qEvM,34829
@@ -221,13 +221,13 @@ keras/src/backend/tensorflow/trackable.py,sha256=QZn0JvpBJ7Kx4e6zM2IVIWz9ADcWDB-
221
221
  keras/src/backend/tensorflow/trainer.py,sha256=otJ4CciSmE2-QH17bEyjaL-oQxiCD5eWgbkEglxjt4w,37191
222
222
  keras/src/backend/torch/__init__.py,sha256=0SiJ91WMaE_tO5q1zUsLEnU6hmPTpGKPIOkmIWaHlhk,2131
223
223
  keras/src/backend/torch/core.py,sha256=cMo5dCDxWicWgezOargrq1qx-tlS1GJzzYXm--PS1l0,24506
224
- keras/src/backend/torch/export.py,sha256=yYc5-4JxSiaCkbFWpfCIdcm4dDBv_9uG_uH6JR6oGx0,4909
224
+ keras/src/backend/torch/export.py,sha256=9efHPdN_xvSWB0pndr_X9JJmQNwWyvrGBQOr_xOQDHc,5685
225
225
  keras/src/backend/torch/image.py,sha256=eer8LZwDMz3k2Dh5gxeTQfwrxPemM_H6eHMIP3AwRss,39149
226
226
  keras/src/backend/torch/layer.py,sha256=htECdpv9ioHWM8_zqQkEdxgDsgLu8XJi5yXgnLl-JFw,2084
227
227
  keras/src/backend/torch/linalg.py,sha256=wgPCfnscp5HOBmX9_-m-57lzxs1ttLNzmHqj2VYYq7k,2108
228
228
  keras/src/backend/torch/math.py,sha256=g-ElDii2Y_o1-t6BAu2nbS7JH-aPqVS5Fqds8aYzIlg,14324
229
229
  keras/src/backend/torch/nn.py,sha256=80MdDzkN7wV3MJbNsGh9B8IkdBoXC36wQcV8_o13y-8,37688
230
- keras/src/backend/torch/numpy.py,sha256=19zGj9Yr98Aejtv_GLiqeDQjAd_MrBedpWTqGed_gYw,59093
230
+ keras/src/backend/torch/numpy.py,sha256=j686g4KwZjQqxH3ww2ioZrqZVyC8SeqKGRUuMOjWC5I,59354
231
231
  keras/src/backend/torch/random.py,sha256=YhLfC7qkGpzlU_i6gGPVormo3BMSo7OUA3TC3GCehrA,8292
232
232
  keras/src/backend/torch/rnn.py,sha256=MJIVbHKsUA2dZm4Gu2NvRxlrFCWeWSxSZRmFxSsC3Zg,26041
233
233
  keras/src/backend/torch/trainer.py,sha256=dcikz1c5O0FHNzRKSi6WhIHsHfLV2HDlrXPElSd1cgE,17985
@@ -344,7 +344,7 @@ keras/src/layers/merging/minimum.py,sha256=f8RN1O5yYzDqJbXuVTBKC0TKdEw_VU4bC4pZX
344
344
  keras/src/layers/merging/multiply.py,sha256=WvBX5gOpouqfQYnpioKMw2Tj6HRQQ2LNBuvKsRo_6P0,3185
345
345
  keras/src/layers/merging/subtract.py,sha256=ijpJDomo1JSMCw97Rn55LXiVLsI50lcvUxmZiv_HIzo,2684
346
346
  keras/src/layers/normalization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
347
- keras/src/layers/normalization/batch_normalization.py,sha256=Hov8hKKqAOl1TDRtmH0S6jn8iutjdcVlw_Q_EcElDBc,14138
347
+ keras/src/layers/normalization/batch_normalization.py,sha256=RXvCB4Nq0I9c5qPtL9UlhuOaomfsjBtrs7B-sAUYZhg,23453
348
348
  keras/src/layers/normalization/group_normalization.py,sha256=S8w40kMCi_aEN079vwDPxaV7K02Ny0HocZJ1ATX4SpA,9367
349
349
  keras/src/layers/normalization/layer_normalization.py,sha256=4GHBLQf2BSRLv2-73rPRWTgLKwYU7V0wXKZ99dA2jhw,8622
350
350
  keras/src/layers/normalization/rms_normalization.py,sha256=IJMNWEg41ELWWd_V5PfUZaseB2lIKKpE0KC-M-T4INY,3008
@@ -504,7 +504,7 @@ keras/src/ops/linalg.py,sha256=3V8S_cgNxZZCIFcFj-FBHTdRqWNbimDtumMvfoc0f30,26736
504
504
  keras/src/ops/math.py,sha256=4qYMJ5qAPmeSyeF63YWoGbUkQt6f4_VX0enOChU4mXU,37233
505
505
  keras/src/ops/nn.py,sha256=04gjHB2BWusy4tWm59EO5Ns1paJC5umDNGwNCKzaJWQ,104658
506
506
  keras/src/ops/node.py,sha256=aJgn9D-GkteE--Bbt2cZ9JjVxb2W2uS1OWEKoeLsl3Y,5583
507
- keras/src/ops/numpy.py,sha256=1rIBEmWGxdHUSoSjy-JRaC-GAHPCa-MZ50bfoIdUvRE,262690
507
+ keras/src/ops/numpy.py,sha256=VmjIuyjZYDu37AO0PaJ6fpw8MnVKcFUJ5IDx_nnPxps,264374
508
508
  keras/src/ops/operation.py,sha256=A7sh9Hi6kZb7wkeMmhrDQIq770ofANXuP-Qg-kwCM3o,15485
509
509
  keras/src/ops/operation_utils.py,sha256=C6eThl-haKzlDH0fC1rn5-P1P-pCfIfXs-fy-ADR534,14523
510
510
  keras/src/ops/symbolic_arguments.py,sha256=MKwXxZYkyouD9BPmQ1uUNxILdcwPvTayAqXaUV3P3o4,1628
@@ -543,12 +543,12 @@ keras/src/random/seed_generator.py,sha256=-a0CQa7--Xt0g0nfdjLmUzlFElY9Y838VcCx05
543
543
  keras/src/regularizers/__init__.py,sha256=GzK9FTKL2Xxd5H55GfG9gxDqt4eZoVHFWICgb2VW8qM,1731
544
544
  keras/src/regularizers/regularizers.py,sha256=MDtsiFjLgI1sl9z036XcQhZH9OnUmMHM74l27dspum0,11802
545
545
  keras/src/saving/__init__.py,sha256=vnrtfvnzW7Gwtxe5COhaMoEnVYB5iDe2YlqJ-DvqFIk,614
546
- keras/src/saving/file_editor.py,sha256=tsUo9mQbMa8433tHTnOKWFhDeathYwDb0CeWcDTTTBQ,32089
546
+ keras/src/saving/file_editor.py,sha256=mNlXakl58wPC7nWRAVTh-esQ0UN2x-q5piI2ZXYQ80k,32263
547
547
  keras/src/saving/keras_saveable.py,sha256=aGIt1ajtsaamfUq18LM6ql8JEoQzi3HwzJEuwQ9bmKE,1285
548
548
  keras/src/saving/object_registration.py,sha256=OOO-7-SNfPoFkFsR_c5jzE6aSIDIlHlnMcm9IlI_Gbs,7357
549
549
  keras/src/saving/orbax_util.py,sha256=ArJI9hQODUyyvzCiXt8AS3VH6E4SL0vF02-RHBk30gU,1621
550
550
  keras/src/saving/saving_api.py,sha256=PMkxXhtNNKX8GlwIsCP8-Plt19M012wNEk7i8BhxWzo,12670
551
- keras/src/saving/saving_lib.py,sha256=-uSXsojqzSl19FtW5FogCclvnu_nnVU3S-Si293DNq0,58723
551
+ keras/src/saving/saving_lib.py,sha256=bRI8TeNOlflTfX3njSkkwNv-VYip-OW7ienIm0lL96I,58920
552
552
  keras/src/saving/serialization_lib.py,sha256=yzCTm8hin__MGA2N5M5F-8Zbts5ZJVmINbrH4wEtIwI,30334
553
553
  keras/src/testing/__init__.py,sha256=7vVsV7Rn3rG99DdURgnH8ncpxagRwIE0uhH-R4qDyok,315
554
554
  keras/src/testing/test_case.py,sha256=ZisRWfnbKiRdAee59wsq9PsDJGsAMGQKxMtELyERok0,31852
@@ -618,7 +618,7 @@ keras/utils/bounding_boxes/__init__.py,sha256=jtvQll4u8ZY0Z96HwNhP1nxWEG9FM3gI-6
618
618
  keras/utils/legacy/__init__.py,sha256=oSYZz6uS8UxSElRaaJYWJEoweJ4GAasZjnn7fNaOlog,342
619
619
  keras/visualization/__init__.py,sha256=UKWmiy6sps4SWlmQi9WX8_Z53cPpLlphz2zIeHdwJpQ,722
620
620
  keras/wrappers/__init__.py,sha256=QkS-O5K8qGS7C3sytF8MpmO6PasATpNVGF8qtb7Ojsw,407
621
- keras_nightly-3.14.0.dev2026012604.dist-info/METADATA,sha256=hoMrMEDlNy-ToYdBIMoJjk-t3RxmNa9C_cq04cnPQcc,6339
622
- keras_nightly-3.14.0.dev2026012604.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
623
- keras_nightly-3.14.0.dev2026012604.dist-info/top_level.txt,sha256=ptcw_-QuGZ4ZDjMdwi_Z0clZm8QAqFdvzzFnDEOTs9o,6
624
- keras_nightly-3.14.0.dev2026012604.dist-info/RECORD,,
621
+ keras_nightly-3.14.0.dev2026012804.dist-info/METADATA,sha256=PHIKEROkWa1oJK381wl7eaROrHFpk2-e2N8yNGMRLfI,6339
622
+ keras_nightly-3.14.0.dev2026012804.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
623
+ keras_nightly-3.14.0.dev2026012804.dist-info/top_level.txt,sha256=ptcw_-QuGZ4ZDjMdwi_Z0clZm8QAqFdvzzFnDEOTs9o,6
624
+ keras_nightly-3.14.0.dev2026012804.dist-info/RECORD,,