keras-nightly 3.14.0.dev2026012204__py3-none-any.whl → 3.14.0.dev2026012404__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. keras/_tf_keras/keras/ops/__init__.py +2 -0
  2. keras/_tf_keras/keras/ops/numpy/__init__.py +2 -0
  3. keras/ops/__init__.py +2 -0
  4. keras/ops/numpy/__init__.py +2 -0
  5. keras/src/backend/jax/numpy.py +10 -0
  6. keras/src/backend/numpy/numpy.py +8 -0
  7. keras/src/backend/openvino/numpy.py +41 -2
  8. keras/src/backend/tensorflow/numpy.py +40 -0
  9. keras/src/backend/torch/core.py +4 -1
  10. keras/src/backend/torch/nn.py +7 -4
  11. keras/src/backend/torch/numpy.py +36 -0
  12. keras/src/layers/layer.py +10 -1
  13. keras/src/layers/preprocessing/image_preprocessing/aug_mix.py +13 -0
  14. keras/src/layers/preprocessing/image_preprocessing/base_image_preprocessing_layer.py +58 -0
  15. keras/src/layers/preprocessing/image_preprocessing/cut_mix.py +13 -0
  16. keras/src/layers/preprocessing/image_preprocessing/max_num_bounding_box.py +23 -0
  17. keras/src/layers/preprocessing/image_preprocessing/rand_augment.py +15 -0
  18. keras/src/layers/preprocessing/image_preprocessing/random_color_degeneration.py +15 -0
  19. keras/src/layers/preprocessing/image_preprocessing/random_color_jitter.py +15 -0
  20. keras/src/layers/preprocessing/image_preprocessing/random_contrast.py +15 -0
  21. keras/src/layers/preprocessing/image_preprocessing/random_crop.py +15 -0
  22. keras/src/layers/preprocessing/image_preprocessing/random_elastic_transform.py +14 -0
  23. keras/src/layers/preprocessing/image_preprocessing/random_erasing.py +15 -0
  24. keras/src/layers/preprocessing/image_preprocessing/random_flip.py +15 -0
  25. keras/src/layers/preprocessing/image_preprocessing/random_gaussian_blur.py +15 -0
  26. keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py +15 -0
  27. keras/src/layers/preprocessing/image_preprocessing/random_invert.py +15 -0
  28. keras/src/layers/preprocessing/image_preprocessing/random_perspective.py +14 -0
  29. keras/src/layers/preprocessing/image_preprocessing/random_posterization.py +15 -0
  30. keras/src/layers/preprocessing/image_preprocessing/random_rotation.py +15 -0
  31. keras/src/layers/preprocessing/image_preprocessing/random_sharpness.py +15 -0
  32. keras/src/layers/preprocessing/image_preprocessing/random_shear.py +15 -0
  33. keras/src/layers/preprocessing/image_preprocessing/random_translation.py +15 -0
  34. keras/src/layers/rnn/time_distributed.py +36 -28
  35. keras/src/ops/core.py +1 -1
  36. keras/src/ops/numpy.py +113 -0
  37. keras/src/regularizers/regularizers.py +2 -2
  38. keras/src/version.py +1 -1
  39. {keras_nightly-3.14.0.dev2026012204.dist-info → keras_nightly-3.14.0.dev2026012404.dist-info}/METADATA +1 -1
  40. {keras_nightly-3.14.0.dev2026012204.dist-info → keras_nightly-3.14.0.dev2026012404.dist-info}/RECORD +42 -42
  41. {keras_nightly-3.14.0.dev2026012204.dist-info → keras_nightly-3.14.0.dev2026012404.dist-info}/WHEEL +0 -0
  42. {keras_nightly-3.14.0.dev2026012204.dist-info → keras_nightly-3.14.0.dev2026012404.dist-info}/top_level.txt +0 -0
@@ -245,6 +245,8 @@ from keras.src.ops.numpy import mod as mod
245
245
  from keras.src.ops.numpy import moveaxis as moveaxis
246
246
  from keras.src.ops.numpy import multiply as multiply
247
247
  from keras.src.ops.numpy import nan_to_num as nan_to_num
248
+ from keras.src.ops.numpy import nanmax as nanmax
249
+ from keras.src.ops.numpy import nanmin as nanmin
248
250
  from keras.src.ops.numpy import nansum as nansum
249
251
  from keras.src.ops.numpy import ndim as ndim
250
252
  from keras.src.ops.numpy import negative as negative
@@ -129,6 +129,8 @@ from keras.src.ops.numpy import mod as mod
129
129
  from keras.src.ops.numpy import moveaxis as moveaxis
130
130
  from keras.src.ops.numpy import multiply as multiply
131
131
  from keras.src.ops.numpy import nan_to_num as nan_to_num
132
+ from keras.src.ops.numpy import nanmax as nanmax
133
+ from keras.src.ops.numpy import nanmin as nanmin
132
134
  from keras.src.ops.numpy import nansum as nansum
133
135
  from keras.src.ops.numpy import ndim as ndim
134
136
  from keras.src.ops.numpy import negative as negative
keras/ops/__init__.py CHANGED
@@ -245,6 +245,8 @@ from keras.src.ops.numpy import mod as mod
245
245
  from keras.src.ops.numpy import moveaxis as moveaxis
246
246
  from keras.src.ops.numpy import multiply as multiply
247
247
  from keras.src.ops.numpy import nan_to_num as nan_to_num
248
+ from keras.src.ops.numpy import nanmax as nanmax
249
+ from keras.src.ops.numpy import nanmin as nanmin
248
250
  from keras.src.ops.numpy import nansum as nansum
249
251
  from keras.src.ops.numpy import ndim as ndim
250
252
  from keras.src.ops.numpy import negative as negative
@@ -129,6 +129,8 @@ from keras.src.ops.numpy import mod as mod
129
129
  from keras.src.ops.numpy import moveaxis as moveaxis
130
130
  from keras.src.ops.numpy import multiply as multiply
131
131
  from keras.src.ops.numpy import nan_to_num as nan_to_num
132
+ from keras.src.ops.numpy import nanmax as nanmax
133
+ from keras.src.ops.numpy import nanmin as nanmin
132
134
  from keras.src.ops.numpy import nansum as nansum
133
135
  from keras.src.ops.numpy import ndim as ndim
134
136
  from keras.src.ops.numpy import negative as negative
@@ -1013,6 +1013,16 @@ def moveaxis(x, source, destination):
1013
1013
  return jnp.moveaxis(x, source=source, destination=destination)
1014
1014
 
1015
1015
 
1016
+ def nanmax(x, axis=None, keepdims=False):
1017
+ x = convert_to_tensor(x)
1018
+ return jnp.nanmax(x, axis=axis, keepdims=keepdims)
1019
+
1020
+
1021
+ def nanmin(x, axis=None, keepdims=False):
1022
+ x = convert_to_tensor(x)
1023
+ return jnp.nanmin(x, axis=axis, keepdims=keepdims)
1024
+
1025
+
1016
1026
  def nansum(x, axis=None, keepdims=False):
1017
1027
  x = convert_to_tensor(x)
1018
1028
  return jnp.nansum(x, axis=axis, keepdims=keepdims)
@@ -960,6 +960,14 @@ def moveaxis(x, source, destination):
960
960
  return np.moveaxis(x, source=source, destination=destination)
961
961
 
962
962
 
963
+ def nanmax(x, axis=None, keepdims=False):
964
+ return np.nanmax(x, axis=axis, keepdims=keepdims)
965
+
966
+
967
+ def nanmin(x, axis=None, keepdims=False):
968
+ return np.nanmin(x, axis=axis, keepdims=keepdims)
969
+
970
+
963
971
  def nansum(x, axis=None, keepdims=False):
964
972
  axis = standardize_axis_for_numpy(axis)
965
973
  dtype = standardize_dtype(x.dtype)
@@ -1522,7 +1522,25 @@ def lcm(x1, x2):
1522
1522
 
1523
1523
 
1524
1524
  def ldexp(x1, x2):
1525
- raise NotImplementedError("`ldexp` is not supported with openvino backend")
1525
+ element_type = None
1526
+ if isinstance(x1, OpenVINOKerasTensor):
1527
+ element_type = x1.output.get_element_type()
1528
+ if isinstance(x2, OpenVINOKerasTensor):
1529
+ element_type = x2.output.get_element_type()
1530
+ x1 = get_ov_output(x1, element_type)
1531
+ x2 = get_ov_output(x2, element_type)
1532
+ x1, x2 = _align_operand_types(x1, x2, "ldexp()")
1533
+
1534
+ float_dtype = OPENVINO_DTYPES[config.floatx()]
1535
+ if x1.get_element_type().is_integral():
1536
+ x1 = ov_opset.convert(x1, float_dtype)
1537
+ if x2.get_element_type().is_integral():
1538
+ x2 = ov_opset.convert(x2, float_dtype)
1539
+
1540
+ const_two = ov_opset.constant(2, x2.get_element_type())
1541
+ result = ov_opset.multiply(x1, ov_opset.power(const_two, x2))
1542
+
1543
+ return OpenVINOKerasTensor(result.output(0))
1526
1544
 
1527
1545
 
1528
1546
  def less(x1, x2):
@@ -2089,6 +2107,14 @@ def moveaxis(x, source, destination):
2089
2107
  return OpenVINOKerasTensor(ov_opset.transpose(x, axes_const).output(0))
2090
2108
 
2091
2109
 
2110
+ def nanmax(x, axis=None, keepdims=False):
2111
+ raise NotImplementedError("`nanmax` is not supported with openvino backend")
2112
+
2113
+
2114
+ def nanmin(x, axis=None, keepdims=False):
2115
+ raise NotImplementedError("`nanmin` is not supported with openvino backend")
2116
+
2117
+
2092
2118
  def nansum(x, axis=None, keepdims=False):
2093
2119
  raise NotImplementedError("`nansum` is not supported with openvino backend")
2094
2120
 
@@ -3188,7 +3214,20 @@ def correlate(x1, x2, mode="valid"):
3188
3214
 
3189
3215
 
3190
3216
  def select(condlist, choicelist, default=0):
3191
- raise NotImplementedError("`select` is not supported with openvino backend")
3217
+ if len(condlist) != len(choicelist):
3218
+ raise ValueError(
3219
+ "select(): condlist and choicelist must have the same length"
3220
+ )
3221
+ conds = [get_ov_output(c) for c in condlist]
3222
+ choices = [get_ov_output(v) for v in choicelist]
3223
+
3224
+ result = get_ov_output(default)
3225
+ for cond_idx in reversed(range(len(conds))):
3226
+ cond = conds[cond_idx]
3227
+ choice = choices[cond_idx]
3228
+ choice, result = _align_operand_types(choice, result, "select()")
3229
+ result = ov_opset.select(cond, choice, result).output(0)
3230
+ return OpenVINOKerasTensor(result)
3192
3231
 
3193
3232
 
3194
3233
  def slogdet(x):
@@ -2125,6 +2125,46 @@ def moveaxis(x, source, destination):
2125
2125
  return tf.transpose(x, perm)
2126
2126
 
2127
2127
 
2128
+ def nanmax(x, axis=None, keepdims=False):
2129
+ x = convert_to_tensor(x)
2130
+
2131
+ if not x.dtype.is_floating:
2132
+ dtype = standardize_dtype(x.dtype)
2133
+ if dtype == "bool":
2134
+ return tf.reduce_any(x, axis=axis, keepdims=keepdims)
2135
+ return tf.reduce_max(x, axis=axis, keepdims=keepdims)
2136
+
2137
+ x_clean = tf.where(
2138
+ tf.math.is_nan(x), tf.constant(float("-inf"), dtype=x.dtype), x
2139
+ )
2140
+
2141
+ return tf.where(
2142
+ tf.reduce_all(tf.math.is_nan(x), axis=axis, keepdims=keepdims),
2143
+ tf.constant(float("nan"), dtype=x.dtype),
2144
+ tf.reduce_max(x_clean, axis=axis, keepdims=keepdims),
2145
+ )
2146
+
2147
+
2148
+ def nanmin(x, axis=None, keepdims=False):
2149
+ x = convert_to_tensor(x)
2150
+
2151
+ if not x.dtype.is_floating:
2152
+ dtype = standardize_dtype(x.dtype)
2153
+ if dtype == "bool":
2154
+ return tf.reduce_all(x, axis=axis, keepdims=keepdims)
2155
+ return tf.reduce_min(x, axis=axis, keepdims=keepdims)
2156
+
2157
+ x_clean = tf.where(
2158
+ tf.math.is_nan(x), tf.constant(float("inf"), dtype=x.dtype), x
2159
+ )
2160
+
2161
+ return tf.where(
2162
+ tf.reduce_all(tf.math.is_nan(x), axis=axis, keepdims=keepdims),
2163
+ tf.constant(float("nan"), dtype=x.dtype),
2164
+ tf.reduce_min(x_clean, axis=axis, keepdims=keepdims),
2165
+ )
2166
+
2167
+
2128
2168
  def nansum(x, axis=None, keepdims=False):
2129
2169
  x = convert_to_tensor(x)
2130
2170
  dtype = standardize_dtype(x.dtype)
@@ -1,6 +1,7 @@
1
1
  import builtins
2
2
  import contextlib
3
3
  import functools
4
+ import os
4
5
 
5
6
  import ml_dtypes
6
7
  import numpy as np
@@ -26,7 +27,9 @@ IS_THREAD_SAFE = True
26
27
  # Some operators such as 'aten::_foreach_mul_.Scalar'
27
28
  # are not currently implemented for the MPS device.
28
29
  # check https://github.com/pytorch/pytorch/issues/77764.
29
- if torch.backends.mps.is_available():
30
+ if "KERAS_TORCH_DEVICE" in os.environ:
31
+ DEFAULT_DEVICE = os.environ["KERAS_TORCH_DEVICE"]
32
+ elif torch.backends.mps.is_available():
30
33
  DEFAULT_DEVICE = "mps"
31
34
  elif torch.cuda.is_available():
32
35
  DEFAULT_DEVICE = "cuda"
@@ -1131,10 +1131,6 @@ def dot_product_attention(
1131
1131
  flash_attention=None,
1132
1132
  attn_logits_soft_cap=None,
1133
1133
  ):
1134
- if bias is not None:
1135
- raise ValueError(
1136
- "torch's `dot_product_attention` doesn't support `bias`."
1137
- )
1138
1134
  query = convert_to_tensor(query)
1139
1135
  key = convert_to_tensor(key)
1140
1136
  value = convert_to_tensor(value)
@@ -1144,6 +1140,10 @@ def dot_product_attention(
1144
1140
  f"Received: query.shape={query.shape}, key.shape={key.shape}, "
1145
1141
  f"value.shape={value.shape}."
1146
1142
  )
1143
+ if bias is not None and mask is not None:
1144
+ raise ValueError(
1145
+ "Only one of `bias` and `mask` can be provided. Received both."
1146
+ )
1147
1147
  compute_dtype = backend.result_type(query.dtype, key.dtype, value.dtype)
1148
1148
  query = cast(query, compute_dtype)
1149
1149
  key = cast(key, compute_dtype)
@@ -1154,6 +1154,9 @@ def dot_product_attention(
1154
1154
  # Explicit set `is_causal` to `False` when `mask` is not `None`.
1155
1155
  is_causal = False
1156
1156
  mask = torch.where(mask, 0.0, _get_large_negative(query.dtype))
1157
+ if bias is not None:
1158
+ bias = convert_to_tensor(bias, dtype=compute_dtype)
1159
+ mask = bias # Use `bias` as `mask` for scaled_dot_product_attention.
1157
1160
 
1158
1161
  axis0, axis1 = 1, 2
1159
1162
  query = torch.transpose(query, axis0, axis1)
@@ -1272,6 +1272,42 @@ def moveaxis(x, source, destination):
1272
1272
  return torch.moveaxis(x, source=source, destination=destination)
1273
1273
 
1274
1274
 
1275
+ def nanmax(x, axis=None, keepdims=False):
1276
+ x = convert_to_tensor(x)
1277
+ if not torch.is_floating_point(x):
1278
+ return torch.amax(x, dim=axis, keepdim=keepdims)
1279
+
1280
+ if axis == () or axis == []:
1281
+ return x
1282
+
1283
+ x_clean = torch.where(torch.isnan(x), float("-inf"), x)
1284
+ out = torch.amax(x_clean, dim=axis, keepdim=keepdims)
1285
+
1286
+ return torch.where(
1287
+ torch.isnan(x).all(dim=axis, keepdim=keepdims),
1288
+ torch.tensor(float("nan"), dtype=x.dtype, device=get_device()),
1289
+ out,
1290
+ )
1291
+
1292
+
1293
+ def nanmin(x, axis=None, keepdims=False):
1294
+ x = convert_to_tensor(x)
1295
+ if not torch.is_floating_point(x):
1296
+ return torch.amin(x, dim=axis, keepdim=keepdims)
1297
+
1298
+ if axis == () or axis == []:
1299
+ return x
1300
+
1301
+ x_clean = torch.where(torch.isnan(x), float("inf"), x)
1302
+ out = torch.amin(x_clean, dim=axis, keepdim=keepdims)
1303
+
1304
+ return torch.where(
1305
+ torch.isnan(x).all(dim=axis, keepdim=keepdims),
1306
+ torch.tensor(float("nan"), dtype=x.dtype, device=get_device()),
1307
+ out,
1308
+ )
1309
+
1310
+
1275
1311
  def nansum(x, axis=None, keepdims=False):
1276
1312
  if isinstance(x, (list, tuple)):
1277
1313
  x = stack(x)
keras/src/layers/layer.py CHANGED
@@ -27,6 +27,7 @@ from keras.src import backend
27
27
  from keras.src import constraints
28
28
  from keras.src import dtype_policies
29
29
  from keras.src import initializers
30
+ from keras.src import ops
30
31
  from keras.src import regularizers
31
32
  from keras.src import tree
32
33
  from keras.src import utils
@@ -974,7 +975,15 @@ class Layer(BackendLayer, Operation):
974
975
  if self.activity_regularizer is not None:
975
976
  for output in tree.flatten(outputs):
976
977
  if backend.is_tensor(output):
977
- self.add_loss(self.activity_regularizer(output))
978
+ loss = self.activity_regularizer(output)
979
+ if output.ndim > 0:
980
+ # Normalize by batch size to ensure consistent
981
+ # regularization strength across batch sizes
982
+ batch_size = ops.cast(
983
+ ops.shape(output)[0], dtype=loss.dtype
984
+ )
985
+ loss = ops.divide_no_nan(loss, batch_size)
986
+ self.add_loss(loss)
978
987
 
979
988
  # Set `previous_mask` on outputs if available. It is provided only
980
989
  # for the first positional input arg and its mask.
@@ -5,6 +5,9 @@ from keras.src.api_export import keras_export
5
5
  from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
6
6
  BaseImagePreprocessingLayer,
7
7
  )
8
+ from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
9
+ base_image_preprocessing_transform_example,
10
+ )
8
11
  from keras.src.random import SeedGenerator
9
12
  from keras.src.utils import backend_utils
10
13
 
@@ -71,6 +74,10 @@ class AugMix(BaseImagePreprocessingLayer):
71
74
  interpolation: The interpolation method to use for resizing operations.
72
75
  Options include `"nearest"`, `"bilinear"`. Default is `"bilinear"`.
73
76
  seed: Integer. Used to create a random seed.
77
+
78
+ Example:
79
+
80
+ {{base_image_preprocessing_transform_example}}
74
81
  """
75
82
 
76
83
  _USE_BASE_FACTOR = False
@@ -326,3 +333,9 @@ class AugMix(BaseImagePreprocessingLayer):
326
333
  }
327
334
  base_config = super().get_config()
328
335
  return {**base_config, **config}
336
+
337
+
338
+ AugMix.__doc__ = AugMix.__doc__.replace(
339
+ "{{base_image_preprocessing_transform_example}}",
340
+ base_image_preprocessing_transform_example.replace("{LayerName}", "AugMix"),
341
+ )
@@ -383,3 +383,61 @@ class BaseImagePreprocessingLayer(DataLayer):
383
383
  )
384
384
 
385
385
  return affine_matrix
386
+
387
+
388
+ base_image_preprocessing_transform_example = """
389
+ ```python
390
+ layer = keras.layers.{LayerName}(bounding_box_format="xyxy")
391
+ images = np.random.randint(0, 255, (4, 224, 224, 3), dtype="uint8")
392
+
393
+ bounding_boxes = {
394
+ "boxes": np.array([
395
+ [[10, 20, 100, 150], [50, 60, 200, 250]],
396
+ [[15, 25, 110, 160], [55, 65, 210, 260]],
397
+ [[20, 30, 120, 170], [60, 70, 220, 270]],
398
+ [[25, 35, 130, 180], [65, 75, 230, 280]],
399
+ ], dtype="float32"),
400
+ "labels": np.array([[0, 1], [1, 2], [2, 3], [0, 3]], dtype="int32")
401
+ }
402
+
403
+ labels = keras.ops.one_hot(
404
+ np.array([0, 1, 2, 3]),
405
+ num_classes=4
406
+ )
407
+
408
+ segmentation_masks = np.random.randint(0, 3, (4, 224, 224, 1), dtype="uint8")
409
+
410
+ output = layer(
411
+ {
412
+ "images": images,
413
+ "bounding_boxes": bounding_boxes,
414
+ "labels": labels,
415
+ "segmentation_masks": segmentation_masks
416
+ },
417
+ training=True
418
+ )
419
+ ```
420
+ """
421
+
422
+ base_image_preprocessing_color_example = """
423
+ ```python
424
+ layer = keras.layers.{LayerName}(value_range=(0, 255))
425
+ images = np.random.randint(0, 255, (8, 224, 224, 3), dtype="uint8")
426
+
427
+ labels = keras.ops.one_hot(
428
+ np.array([0, 1, 2, 0, 1, 2, 0, 1]),
429
+ num_classes=3
430
+ )
431
+
432
+ segmentation_masks = np.random.randint(0, 3, (8, 224, 224, 1), dtype="uint8")
433
+
434
+ output = layer(
435
+ {
436
+ "images": images,
437
+ "labels": labels,
438
+ "segmentation_masks": segmentation_masks
439
+ },
440
+ training=True
441
+ )
442
+ ```
443
+ """
@@ -2,6 +2,9 @@ from keras.src.api_export import keras_export
2
2
  from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
3
3
  BaseImagePreprocessingLayer,
4
4
  )
5
+ from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
6
+ base_image_preprocessing_color_example,
7
+ )
5
8
  from keras.src.random import SeedGenerator
6
9
 
7
10
 
@@ -29,6 +32,10 @@ class CutMix(BaseImagePreprocessingLayer):
29
32
  in patch sizes, leading to more diverse and larger mixed patches.
30
33
  Defaults to 1.
31
34
  seed: Integer. Used to create a random seed.
35
+
36
+ Example:
37
+
38
+ {{base_image_preprocessing_color_example}}
32
39
  """
33
40
 
34
41
  _USE_BASE_FACTOR = False
@@ -227,3 +234,9 @@ class CutMix(BaseImagePreprocessingLayer):
227
234
  }
228
235
  base_config = super().get_config()
229
236
  return {**base_config, **config}
237
+
238
+
239
+ CutMix.__doc__ = CutMix.__doc__.replace(
240
+ "{{base_image_preprocessing_color_example}}",
241
+ base_image_preprocessing_color_example.replace("{LayerName}", "CutMix"),
242
+ )
@@ -15,6 +15,29 @@ class MaxNumBoundingBoxes(BaseImagePreprocessingLayer):
15
15
  max_number: Desired output number of bounding boxes.
16
16
  padding_value: The padding value of the `boxes` and `labels` in
17
17
  `bounding_boxes`. Defaults to `-1`.
18
+
19
+ Example:
20
+
21
+ ```python
22
+ max_boxes_layer = keras.layers.MaxNumBoundingBoxes(
23
+ max_number=10,
24
+ fill_value=-1
25
+ )
26
+
27
+ images = np.random.randint(0, 255, (1, 224, 224, 3), dtype="uint8")
28
+
29
+ bounding_boxes = {
30
+ "boxes": np.array([
31
+ [[10, 20, 100, 150], [50, 60, 200, 250], [0, 0, 50, 50]],
32
+ ]),
33
+ "labels": np.array([[1, 2, 3]])
34
+ }
35
+
36
+ result = max_boxes_layer({
37
+ "images": images,
38
+ "bounding_boxes": bounding_boxes
39
+ })
40
+ ```
18
41
  """
19
42
 
20
43
  def __init__(self, max_number, fill_value=-1, **kwargs):
@@ -3,6 +3,9 @@ from keras.src.api_export import keras_export
3
3
  from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
4
4
  BaseImagePreprocessingLayer,
5
5
  )
6
+ from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
7
+ base_image_preprocessing_transform_example,
8
+ )
6
9
  from keras.src.random import SeedGenerator
7
10
  from keras.src.utils import backend_utils
8
11
 
@@ -32,6 +35,10 @@ class RandAugment(BaseImagePreprocessingLayer):
32
35
  interpolation: The interpolation method to use for resizing operations.
33
36
  Options include `nearest`, `bilinear`. Default is `bilinear`.
34
37
  seed: Integer. Used to create a random seed.
38
+
39
+ Example:
40
+
41
+ {{base_image_preprocessing_transform_example}}
35
42
  """
36
43
 
37
44
  _USE_BASE_FACTOR = False
@@ -265,3 +272,11 @@ class RandAugment(BaseImagePreprocessingLayer):
265
272
  }
266
273
  base_config = super().get_config()
267
274
  return {**base_config, **config}
275
+
276
+
277
+ RandAugment.__doc__ = RandAugment.__doc__.replace(
278
+ "{{base_image_preprocessing_transform_example}}",
279
+ base_image_preprocessing_transform_example.replace(
280
+ "{LayerName}", "RandAugment"
281
+ ),
282
+ )
@@ -2,6 +2,9 @@ from keras.src.api_export import keras_export
2
2
  from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
3
3
  BaseImagePreprocessingLayer,
4
4
  )
5
+ from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
6
+ base_image_preprocessing_color_example,
7
+ )
5
8
  from keras.src.random import SeedGenerator
6
9
 
7
10
 
@@ -29,6 +32,10 @@ class RandomColorDegeneration(BaseImagePreprocessingLayer):
29
32
  passed float is sampled. In order to ensure the value is always the
30
33
  same, please pass a tuple with two identical floats: `(0.5, 0.5)`.
31
34
  seed: Integer. Used to create a random seed.
35
+
36
+ Example:
37
+
38
+ {{base_image_preprocessing_color_example}}
32
39
  """
33
40
 
34
41
  _VALUE_RANGE_VALIDATION_ERROR = (
@@ -133,3 +140,11 @@ class RandomColorDegeneration(BaseImagePreprocessingLayer):
133
140
 
134
141
  def compute_output_shape(self, input_shape):
135
142
  return input_shape
143
+
144
+
145
+ RandomColorDegeneration.__doc__ = RandomColorDegeneration.__doc__.replace(
146
+ "{{base_image_preprocessing_color_example}}",
147
+ base_image_preprocessing_color_example.replace(
148
+ "{LayerName}", "RandomColorDegeneration"
149
+ ),
150
+ )
@@ -6,6 +6,9 @@ from keras.src.api_export import keras_export
6
6
  from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
7
7
  BaseImagePreprocessingLayer,
8
8
  )
9
+ from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
10
+ base_image_preprocessing_color_example,
11
+ )
9
12
  from keras.src.random.seed_generator import SeedGenerator
10
13
  from keras.src.utils import backend_utils
11
14
 
@@ -60,6 +63,10 @@ class RandomColorJitter(BaseImagePreprocessingLayer):
60
63
  always the same, please pass a tuple with two identical
61
64
  floats: `(0.5, 0.5)`.
62
65
  seed: Integer. Used to create a random seed.
66
+
67
+ Example:
68
+
69
+ {{base_image_preprocessing_color_example}}
63
70
  """
64
71
 
65
72
  def __init__(
@@ -211,3 +218,11 @@ class RandomColorJitter(BaseImagePreprocessingLayer):
211
218
  }
212
219
  base_config = super().get_config()
213
220
  return {**base_config, **config}
221
+
222
+
223
+ RandomColorJitter.__doc__ = RandomColorJitter.__doc__.replace(
224
+ "{{base_image_preprocessing_color_example}}",
225
+ base_image_preprocessing_color_example.replace(
226
+ "{LayerName}", "RandomColorJitter"
227
+ ),
228
+ )
@@ -2,6 +2,9 @@ from keras.src.api_export import keras_export
2
2
  from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
3
3
  BaseImagePreprocessingLayer,
4
4
  )
5
+ from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
6
+ base_image_preprocessing_color_example,
7
+ )
5
8
  from keras.src.random.seed_generator import SeedGenerator
6
9
 
7
10
 
@@ -45,6 +48,10 @@ class RandomContrast(BaseImagePreprocessingLayer):
45
48
  typically either `[0, 1]` or `[0, 255]` depending on how your
46
49
  preprocessing pipeline is set up.
47
50
  seed: Integer. Used to create a random seed.
51
+
52
+ Example:
53
+
54
+ {{base_image_preprocessing_color_example}}
48
55
  """
49
56
 
50
57
  _FACTOR_BOUNDS = (0, 1)
@@ -147,3 +154,11 @@ class RandomContrast(BaseImagePreprocessingLayer):
147
154
  }
148
155
  base_config = super().get_config()
149
156
  return {**base_config, **config}
157
+
158
+
159
+ RandomContrast.__doc__ = RandomContrast.__doc__.replace(
160
+ "{{base_image_preprocessing_color_example}}",
161
+ base_image_preprocessing_color_example.replace(
162
+ "{LayerName}", "RandomContrast"
163
+ ),
164
+ )
@@ -3,6 +3,9 @@ from keras.src.api_export import keras_export
3
3
  from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
4
4
  BaseImagePreprocessingLayer,
5
5
  )
6
+ from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
7
+ base_image_preprocessing_transform_example,
8
+ )
6
9
  from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
7
10
  convert_format,
8
11
  )
@@ -47,6 +50,10 @@ class RandomCrop(BaseImagePreprocessingLayer):
47
50
  seed: Integer. Used to create a random seed.
48
51
  **kwargs: Base layer keyword arguments, such as
49
52
  `name` and `dtype`.
53
+
54
+ Example:
55
+
56
+ {{base_image_preprocessing_transform_example}}
50
57
  """
51
58
 
52
59
  def __init__(
@@ -274,3 +281,11 @@ class RandomCrop(BaseImagePreprocessingLayer):
274
281
  }
275
282
  )
276
283
  return config
284
+
285
+
286
+ RandomCrop.__doc__ = RandomCrop.__doc__.replace(
287
+ "{{base_image_preprocessing_transform_example}}",
288
+ base_image_preprocessing_transform_example.replace(
289
+ "{LayerName}", "RandomCrop"
290
+ ),
291
+ )
@@ -2,6 +2,9 @@ from keras.src.api_export import keras_export
2
2
  from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
3
3
  BaseImagePreprocessingLayer,
4
4
  )
5
+ from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
6
+ base_image_preprocessing_transform_example,
7
+ )
5
8
  from keras.src.random.seed_generator import SeedGenerator
6
9
 
7
10
 
@@ -62,6 +65,9 @@ class RandomElasticTransform(BaseImagePreprocessingLayer):
62
65
  preprocessing pipeline is set up.
63
66
  seed: Integer. Used to create a random seed.
64
67
 
68
+ Example:
69
+
70
+ {{base_image_preprocessing_transform_example}}
65
71
  """
66
72
 
67
73
  _USE_BASE_FACTOR = False
@@ -277,3 +283,11 @@ class RandomElasticTransform(BaseImagePreprocessingLayer):
277
283
  "seed": self.seed,
278
284
  }
279
285
  return {**base_config, **config}
286
+
287
+
288
+ RandomElasticTransform.__doc__ = RandomElasticTransform.__doc__.replace(
289
+ "{{base_image_preprocessing_transform_example}}",
290
+ base_image_preprocessing_transform_example.replace(
291
+ "{LayerName}", "RandomElasticTransform"
292
+ ),
293
+ )