keras-nightly 3.12.0.dev2025082103__py3-none-any.whl → 3.12.0.dev2025082303__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (126) hide show
  1. keras/_tf_keras/keras/ops/__init__.py +1 -0
  2. keras/_tf_keras/keras/ops/numpy/__init__.py +1 -0
  3. keras/_tf_keras/keras/quantizers/__init__.py +1 -0
  4. keras/ops/__init__.py +1 -0
  5. keras/ops/numpy/__init__.py +1 -0
  6. keras/quantizers/__init__.py +1 -0
  7. keras/src/applications/convnext.py +20 -20
  8. keras/src/applications/densenet.py +21 -21
  9. keras/src/applications/efficientnet.py +16 -16
  10. keras/src/applications/efficientnet_v2.py +28 -28
  11. keras/src/applications/inception_resnet_v2.py +7 -7
  12. keras/src/applications/inception_v3.py +5 -5
  13. keras/src/applications/mobilenet_v2.py +13 -20
  14. keras/src/applications/mobilenet_v3.py +15 -15
  15. keras/src/applications/nasnet.py +7 -8
  16. keras/src/applications/resnet.py +32 -32
  17. keras/src/applications/xception.py +10 -10
  18. keras/src/backend/common/dtypes.py +8 -3
  19. keras/src/backend/common/variables.py +3 -1
  20. keras/src/backend/jax/export.py +1 -1
  21. keras/src/backend/jax/numpy.py +6 -0
  22. keras/src/backend/jax/trainer.py +1 -1
  23. keras/src/backend/numpy/numpy.py +28 -0
  24. keras/src/backend/openvino/numpy.py +5 -1
  25. keras/src/backend/tensorflow/numpy.py +22 -0
  26. keras/src/backend/tensorflow/trainer.py +19 -1
  27. keras/src/backend/torch/core.py +6 -9
  28. keras/src/backend/torch/nn.py +1 -2
  29. keras/src/backend/torch/numpy.py +16 -0
  30. keras/src/backend/torch/trainer.py +1 -1
  31. keras/src/callbacks/backup_and_restore.py +2 -2
  32. keras/src/callbacks/csv_logger.py +1 -1
  33. keras/src/callbacks/model_checkpoint.py +1 -1
  34. keras/src/callbacks/tensorboard.py +6 -6
  35. keras/src/constraints/constraints.py +9 -7
  36. keras/src/datasets/boston_housing.py +1 -1
  37. keras/src/datasets/california_housing.py +1 -1
  38. keras/src/datasets/cifar10.py +1 -1
  39. keras/src/datasets/cifar100.py +2 -2
  40. keras/src/datasets/imdb.py +2 -2
  41. keras/src/datasets/mnist.py +1 -1
  42. keras/src/datasets/reuters.py +2 -2
  43. keras/src/dtype_policies/dtype_policy.py +1 -1
  44. keras/src/dtype_policies/dtype_policy_map.py +1 -1
  45. keras/src/export/tf2onnx_lib.py +1 -3
  46. keras/src/initializers/constant_initializers.py +9 -5
  47. keras/src/layers/input_spec.py +6 -6
  48. keras/src/layers/layer.py +1 -1
  49. keras/src/layers/preprocessing/category_encoding.py +3 -3
  50. keras/src/layers/preprocessing/data_layer.py +159 -0
  51. keras/src/layers/preprocessing/discretization.py +3 -3
  52. keras/src/layers/preprocessing/feature_space.py +4 -4
  53. keras/src/layers/preprocessing/image_preprocessing/aug_mix.py +7 -4
  54. keras/src/layers/preprocessing/image_preprocessing/auto_contrast.py +3 -0
  55. keras/src/layers/preprocessing/image_preprocessing/base_image_preprocessing_layer.py +2 -2
  56. keras/src/layers/preprocessing/image_preprocessing/center_crop.py +1 -1
  57. keras/src/layers/preprocessing/image_preprocessing/cut_mix.py +6 -3
  58. keras/src/layers/preprocessing/image_preprocessing/equalization.py +1 -1
  59. keras/src/layers/preprocessing/image_preprocessing/max_num_bounding_box.py +3 -0
  60. keras/src/layers/preprocessing/image_preprocessing/mix_up.py +7 -4
  61. keras/src/layers/preprocessing/image_preprocessing/rand_augment.py +3 -1
  62. keras/src/layers/preprocessing/image_preprocessing/random_brightness.py +1 -1
  63. keras/src/layers/preprocessing/image_preprocessing/random_color_degeneration.py +3 -0
  64. keras/src/layers/preprocessing/image_preprocessing/random_color_jitter.py +3 -0
  65. keras/src/layers/preprocessing/image_preprocessing/random_contrast.py +1 -1
  66. keras/src/layers/preprocessing/image_preprocessing/random_crop.py +1 -1
  67. keras/src/layers/preprocessing/image_preprocessing/random_elastic_transform.py +3 -0
  68. keras/src/layers/preprocessing/image_preprocessing/random_erasing.py +6 -3
  69. keras/src/layers/preprocessing/image_preprocessing/random_flip.py +1 -1
  70. keras/src/layers/preprocessing/image_preprocessing/random_gaussian_blur.py +3 -0
  71. keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py +1 -1
  72. keras/src/layers/preprocessing/image_preprocessing/random_hue.py +3 -0
  73. keras/src/layers/preprocessing/image_preprocessing/random_invert.py +3 -0
  74. keras/src/layers/preprocessing/image_preprocessing/random_perspective.py +3 -0
  75. keras/src/layers/preprocessing/image_preprocessing/random_posterization.py +3 -0
  76. keras/src/layers/preprocessing/image_preprocessing/random_rotation.py +1 -1
  77. keras/src/layers/preprocessing/image_preprocessing/random_saturation.py +3 -0
  78. keras/src/layers/preprocessing/image_preprocessing/random_sharpness.py +3 -0
  79. keras/src/layers/preprocessing/image_preprocessing/random_shear.py +3 -0
  80. keras/src/layers/preprocessing/image_preprocessing/random_translation.py +3 -3
  81. keras/src/layers/preprocessing/image_preprocessing/random_zoom.py +3 -3
  82. keras/src/layers/preprocessing/image_preprocessing/resizing.py +3 -3
  83. keras/src/layers/preprocessing/image_preprocessing/solarization.py +3 -0
  84. keras/src/layers/preprocessing/mel_spectrogram.py +29 -25
  85. keras/src/layers/preprocessing/normalization.py +5 -2
  86. keras/src/layers/preprocessing/rescaling.py +3 -3
  87. keras/src/layers/rnn/bidirectional.py +4 -4
  88. keras/src/legacy/backend.py +9 -23
  89. keras/src/legacy/preprocessing/image.py +11 -22
  90. keras/src/legacy/preprocessing/text.py +1 -1
  91. keras/src/models/functional.py +2 -2
  92. keras/src/models/model.py +21 -3
  93. keras/src/ops/function.py +1 -1
  94. keras/src/ops/numpy.py +49 -5
  95. keras/src/ops/operation.py +3 -2
  96. keras/src/optimizers/base_optimizer.py +3 -4
  97. keras/src/optimizers/schedules/learning_rate_schedule.py +16 -9
  98. keras/src/quantizers/gptq.py +350 -0
  99. keras/src/quantizers/gptq_config.py +169 -0
  100. keras/src/quantizers/gptq_core.py +335 -0
  101. keras/src/quantizers/gptq_quant.py +133 -0
  102. keras/src/saving/file_editor.py +22 -20
  103. keras/src/saving/object_registration.py +1 -1
  104. keras/src/saving/saving_lib.py +4 -4
  105. keras/src/saving/serialization_lib.py +3 -5
  106. keras/src/trainers/compile_utils.py +1 -1
  107. keras/src/trainers/data_adapters/array_data_adapter.py +9 -3
  108. keras/src/trainers/data_adapters/data_adapter_utils.py +15 -5
  109. keras/src/trainers/data_adapters/generator_data_adapter.py +2 -0
  110. keras/src/trainers/data_adapters/grain_dataset_adapter.py +8 -2
  111. keras/src/trainers/data_adapters/tf_dataset_adapter.py +4 -2
  112. keras/src/trainers/data_adapters/torch_data_loader_adapter.py +3 -1
  113. keras/src/tree/dmtree_impl.py +19 -3
  114. keras/src/tree/optree_impl.py +3 -3
  115. keras/src/tree/tree_api.py +5 -2
  116. keras/src/utils/file_utils.py +13 -5
  117. keras/src/utils/io_utils.py +1 -1
  118. keras/src/utils/model_visualization.py +1 -1
  119. keras/src/utils/progbar.py +5 -5
  120. keras/src/utils/summary_utils.py +4 -4
  121. keras/src/version.py +1 -1
  122. {keras_nightly-3.12.0.dev2025082103.dist-info → keras_nightly-3.12.0.dev2025082303.dist-info}/METADATA +1 -1
  123. {keras_nightly-3.12.0.dev2025082103.dist-info → keras_nightly-3.12.0.dev2025082303.dist-info}/RECORD +125 -121
  124. keras/src/layers/preprocessing/tf_data_layer.py +0 -78
  125. {keras_nightly-3.12.0.dev2025082103.dist-info → keras_nightly-3.12.0.dev2025082303.dist-info}/WHEEL +0 -0
  126. {keras_nightly-3.12.0.dev2025082103.dist-info → keras_nightly-3.12.0.dev2025082303.dist-info}/top_level.txt +0 -0
@@ -3,7 +3,7 @@ from keras.src import layers
3
3
  from keras.src import tree
4
4
  from keras.src.api_export import keras_export
5
5
  from keras.src.layers.layer import Layer
6
- from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer
6
+ from keras.src.layers.preprocessing.data_layer import DataLayer
7
7
  from keras.src.saving import saving_lib
8
8
  from keras.src.saving import serialization_lib
9
9
  from keras.src.saving.keras_saveable import KerasSaveable
@@ -723,7 +723,7 @@ class FeatureSpace(Layer):
723
723
  data[name] = tf.expand_dims(x, -1)
724
724
 
725
725
  with backend_utils.TFGraphScope():
726
- # This scope is to make sure that inner TFDataLayers
726
+ # This scope is to make sure that inner DataLayers
727
727
  # will not convert outputs back to backend-native --
728
728
  # they should be TF tensors throughout
729
729
  preprocessed_data = self._preprocess_features(data)
@@ -808,7 +808,7 @@ class FeatureSpace(Layer):
808
808
  return
809
809
 
810
810
 
811
- class TFDConcat(TFDataLayer):
811
+ class TFDConcat(DataLayer):
812
812
  def __init__(self, axis, **kwargs):
813
813
  super().__init__(**kwargs)
814
814
  self.axis = axis
@@ -817,6 +817,6 @@ class TFDConcat(TFDataLayer):
817
817
  return self.backend.numpy.concatenate(xs, axis=self.axis)
818
818
 
819
819
 
820
- class TFDIdentity(TFDataLayer):
820
+ class TFDIdentity(DataLayer):
821
821
  def call(self, x):
822
822
  return x
@@ -43,6 +43,13 @@ class AugMix(BaseImagePreprocessingLayer):
43
43
  in num_chains different ways, with each chain consisting of
44
44
  chain_depth augmentations.
45
45
 
46
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
47
+ (independently of which backend you're using).
48
+
49
+ References:
50
+ - [AugMix paper](https://arxiv.org/pdf/1912.02781)
51
+ - [Official Code](https://github.com/google-research/augmix)
52
+
46
53
  Args:
47
54
  value_range: the range of values the incoming images will have.
48
55
  Represented as a two number tuple written (low, high).
@@ -64,10 +71,6 @@ class AugMix(BaseImagePreprocessingLayer):
64
71
  interpolation: The interpolation method to use for resizing operations.
65
72
  Options include `"nearest"`, `"bilinear"`. Default is `"bilinear"`.
66
73
  seed: Integer. Used to create a random seed.
67
-
68
- References:
69
- - [AugMix paper](https://arxiv.org/pdf/1912.02781)
70
- - [Official Code](https://github.com/google-research/augmix)
71
74
  """
72
75
 
73
76
  _USE_BASE_FACTOR = False
@@ -17,6 +17,9 @@ class AutoContrast(BaseImagePreprocessingLayer):
17
17
 
18
18
  This layer is active at both training and inference time.
19
19
 
20
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
21
+ (independently of which backend you're using).
22
+
20
23
  Args:
21
24
  value_range: Range of values the incoming images will have.
22
25
  Represented as a two number tuple written `(low, high)`.
@@ -1,13 +1,13 @@
1
1
  import math
2
2
 
3
3
  from keras.src.backend import config as backend_config
4
+ from keras.src.layers.preprocessing.data_layer import DataLayer
4
5
  from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.validation import ( # noqa: E501
5
6
  densify_bounding_boxes,
6
7
  )
7
- from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer
8
8
 
9
9
 
10
- class BaseImagePreprocessingLayer(TFDataLayer):
10
+ class BaseImagePreprocessingLayer(DataLayer):
11
11
  _USE_BASE_FACTOR = True
12
12
  _FACTOR_BOUNDS = (-1, 1)
13
13
 
@@ -36,7 +36,7 @@ class CenterCrop(BaseImagePreprocessingLayer):
36
36
  If the input height/width is even and the target height/width is odd (or
37
37
  inversely), the input image is left-padded by 1 pixel.
38
38
 
39
- **Note:** This layer is safe to use inside a `tf.data` pipeline
39
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
40
40
  (independently of which backend you're using).
41
41
 
42
42
  Args:
@@ -13,6 +13,12 @@ class CutMix(BaseImagePreprocessingLayer):
13
13
  between two images in the dataset, while the labels are also mixed
14
14
  proportionally to the area of the patches.
15
15
 
16
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
17
+ (independently of which backend you're using).
18
+
19
+ References:
20
+ - [CutMix paper]( https://arxiv.org/abs/1905.04899).
21
+
16
22
  Args:
17
23
  factor: A single float or a tuple of two floats between 0 and 1.
18
24
  If a tuple of numbers is passed, a `factor` is sampled
@@ -23,9 +29,6 @@ class CutMix(BaseImagePreprocessingLayer):
23
29
  in patch sizes, leading to more diverse and larger mixed patches.
24
30
  Defaults to 1.
25
31
  seed: Integer. Used to create a random seed.
26
-
27
- References:
28
- - [CutMix paper]( https://arxiv.org/abs/1905.04899).
29
32
  """
30
33
 
31
34
  _USE_BASE_FACTOR = False
@@ -18,7 +18,7 @@ class Equalization(BaseImagePreprocessingLayer):
18
18
  equalization independently on each color channel. At inference time,
19
19
  the equalization is consistently applied.
20
20
 
21
- **Note:** This layer is safe to use inside a `tf.data` pipeline
21
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
22
22
  (independently of which backend you're using).
23
23
 
24
24
  Args:
@@ -8,6 +8,9 @@ from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing
8
8
  class MaxNumBoundingBoxes(BaseImagePreprocessingLayer):
9
9
  """Ensure the maximum number of bounding boxes.
10
10
 
11
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
12
+ (independently of which backend you're using).
13
+
11
14
  Args:
12
15
  max_number: Desired output number of bounding boxes.
13
16
  padding_value: The padding value of the `boxes` and `labels` in
@@ -11,6 +11,13 @@ from keras.src.utils import backend_utils
11
11
  class MixUp(BaseImagePreprocessingLayer):
12
12
  """MixUp implements the MixUp data augmentation technique.
13
13
 
14
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
15
+ (independently of which backend you're using).
16
+
17
+ References:
18
+ - [MixUp paper](https://arxiv.org/abs/1710.09412).
19
+ - [MixUp for Object Detection paper](https://arxiv.org/pdf/1902.04103).
20
+
14
21
  Args:
15
22
  alpha: Float between 0 and 1. Controls the blending strength.
16
23
  Smaller values mean less mixing, while larger values allow
@@ -18,10 +25,6 @@ class MixUp(BaseImagePreprocessingLayer):
18
25
  recommended for ImageNet1k classification.
19
26
  seed: Integer. Used to create a random seed.
20
27
 
21
- References:
22
- - [MixUp paper](https://arxiv.org/abs/1710.09412).
23
- - [MixUp for Object Detection paper](https://arxiv.org/pdf/1902.04103).
24
-
25
28
  Example:
26
29
  ```python
27
30
  (images, labels), _ = keras.datasets.cifar10.load_data()
@@ -15,6 +15,9 @@ class RandAugment(BaseImagePreprocessingLayer):
15
15
  policy implemented by this layer has been benchmarked extensively and is
16
16
  effective on a wide variety of datasets.
17
17
 
18
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
19
+ (independently of which backend you're using).
20
+
18
21
  References:
19
22
  - [RandAugment](https://arxiv.org/abs/1909.13719)
20
23
 
@@ -29,7 +32,6 @@ class RandAugment(BaseImagePreprocessingLayer):
29
32
  interpolation: The interpolation method to use for resizing operations.
30
33
  Options include `nearest`, `bilinear`. Default is `bilinear`.
31
34
  seed: Integer. Used to create a random seed.
32
-
33
35
  """
34
36
 
35
37
  _USE_BASE_FACTOR = False
@@ -13,7 +13,7 @@ class RandomBrightness(BaseImagePreprocessingLayer):
13
13
  images. At inference time, the output will be identical to the input.
14
14
  Call the layer with `training=True` to adjust the brightness of the input.
15
15
 
16
- **Note:** This layer is safe to use inside a `tf.data` pipeline
16
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
17
17
  (independently of which backend you're using).
18
18
 
19
19
  Args:
@@ -13,6 +13,9 @@ class RandomColorDegeneration(BaseImagePreprocessingLayer):
13
13
  color. It then takes a weighted average between original image and the
14
14
  degenerated image. This makes colors appear more dull.
15
15
 
16
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
17
+ (independently of which backend you're using).
18
+
16
19
  Args:
17
20
  factor: A tuple of two floats or a single float.
18
21
  `factor` controls the extent to which the
@@ -16,6 +16,9 @@ class RandomColorJitter(BaseImagePreprocessingLayer):
16
16
  and hue image processing operation sequentially and randomly on the
17
17
  input.
18
18
 
19
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
20
+ (independently of which backend you're using).
21
+
19
22
  Args:
20
23
  value_range: the range of values the incoming images will have.
21
24
  Represented as a two number tuple written [low, high].
@@ -21,7 +21,7 @@ class RandomContrast(BaseImagePreprocessingLayer):
21
21
  in integer or floating point dtype.
22
22
  By default, the layer will output floats.
23
23
 
24
- **Note:** This layer is safe to use inside a `tf.data` pipeline
24
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
25
25
  (independently of which backend you're using).
26
26
 
27
27
  Input shape:
@@ -30,7 +30,7 @@ class RandomCrop(BaseImagePreprocessingLayer):
30
30
  of integer or floating point dtype. By default, the layer will output
31
31
  floats.
32
32
 
33
- **Note:** This layer is safe to use inside a `tf.data` pipeline
33
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
34
34
  (independently of which backend you're using).
35
35
 
36
36
  Input shape:
@@ -14,6 +14,9 @@ class RandomElasticTransform(BaseImagePreprocessingLayer):
14
14
  distortion is controlled by the `scale` parameter, while the `factor`
15
15
  determines the probability of applying the transformation.
16
16
 
17
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
18
+ (independently of which backend you're using).
19
+
17
20
  Args:
18
21
  factor: A single float or a tuple of two floats.
19
22
  `factor` controls the probability of applying the transformation.
@@ -13,6 +13,12 @@ class RandomErasing(BaseImagePreprocessingLayer):
13
13
  an image are erased (replaced by a constant value or noise)
14
14
  during training to improve generalization.
15
15
 
16
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
17
+ (independently of which backend you're using).
18
+
19
+ References:
20
+ - [Random Erasing paper](https://arxiv.org/abs/1708.04896).
21
+
16
22
  Args:
17
23
  factor: A single float or a tuple of two floats.
18
24
  `factor` controls the probability of applying the transformation.
@@ -35,9 +41,6 @@ class RandomErasing(BaseImagePreprocessingLayer):
35
41
  typically either `[0, 1]` or `[0, 255]` depending on how your
36
42
  preprocessing pipeline is set up.
37
43
  seed: Integer. Used to create a random seed.
38
-
39
- References:
40
- - [Random Erasing paper](https://arxiv.org/abs/1708.04896).
41
44
  """
42
45
 
43
46
  _USE_BASE_FACTOR = False
@@ -27,7 +27,7 @@ class RandomFlip(BaseImagePreprocessingLayer):
27
27
  of integer or floating point dtype.
28
28
  By default, the layer will output floats.
29
29
 
30
- **Note:** This layer is safe to use inside a `tf.data` pipeline
30
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
31
31
  (independently of which backend you're using).
32
32
 
33
33
  Input shape:
@@ -13,6 +13,9 @@ class RandomGaussianBlur(BaseImagePreprocessingLayer):
13
13
  randomly selected degree of blurring, controlled by the `factor` and
14
14
  `sigma` arguments.
15
15
 
16
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
17
+ (independently of which backend you're using).
18
+
16
19
  Args:
17
20
  factor: A single float or a tuple of two floats.
18
21
  `factor` controls the extent to which the image hue is impacted.
@@ -19,7 +19,7 @@ class RandomGrayscale(BaseImagePreprocessingLayer):
19
19
  image using standard RGB to grayscale conversion coefficients. Images
20
20
  that are not selected for conversion remain unchanged.
21
21
 
22
- **Note:** This layer is safe to use inside a `tf.data` pipeline
22
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
23
23
  (independently of which backend you're using).
24
24
 
25
25
  Args:
@@ -14,6 +14,9 @@ class RandomHue(BaseImagePreprocessingLayer):
14
14
  The image hue is adjusted by converting the image(s) to HSV and rotating the
15
15
  hue channel (H) by delta. The image is then converted back to RGB.
16
16
 
17
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
18
+ (independently of which backend you're using).
19
+
17
20
  Args:
18
21
  factor: A single float or a tuple of two floats.
19
22
  `factor` controls the extent to which the
@@ -14,6 +14,9 @@ class RandomInvert(BaseImagePreprocessingLayer):
14
14
  complementary values. Images that are not selected for inversion
15
15
  remain unchanged.
16
16
 
17
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
18
+ (independently of which backend you're using).
19
+
17
20
  Args:
18
21
  factor: A single float or a tuple of two floats.
19
22
  `factor` controls the probability of inverting the image colors.
@@ -20,6 +20,9 @@ class RandomPerspective(BaseImagePreprocessingLayer):
20
20
  corner points, simulating a 3D-like transformation. The amount of distortion
21
21
  is controlled by the `factor` and `scale` parameters.
22
22
 
23
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
24
+ (independently of which backend you're using).
25
+
23
26
  Args:
24
27
  factor: A float or a tuple of two floats.
25
28
  Represents the probability of applying the perspective
@@ -8,6 +8,9 @@ from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing
8
8
  class RandomPosterization(BaseImagePreprocessingLayer):
9
9
  """Reduces the number of bits for each color channel.
10
10
 
11
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
12
+ (independently of which backend you're using).
13
+
11
14
  References:
12
15
  - [AutoAugment: Learning Augmentation Policies from Data](https://arxiv.org/abs/1805.09501)
13
16
  - [RandAugment: Practical automated data augmentation with a reduced search space](https://arxiv.org/abs/1909.13719)
@@ -23,7 +23,7 @@ class RandomRotation(BaseImagePreprocessingLayer):
23
23
  of integer or floating point dtype.
24
24
  By default, the layer will output floats.
25
25
 
26
- **Note:** This layer is safe to use inside a `tf.data` pipeline
26
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
27
27
  (independently of which backend you're using).
28
28
 
29
29
  Input shape:
@@ -13,6 +13,9 @@ class RandomSaturation(BaseImagePreprocessingLayer):
13
13
  This layer will randomly increase/reduce the saturation for the input RGB
14
14
  images.
15
15
 
16
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
17
+ (independently of which backend you're using).
18
+
16
19
  Args:
17
20
  factor: A tuple of two floats or a single float.
18
21
  `factor` controls the extent to which the image saturation
@@ -13,6 +13,9 @@ class RandomSharpness(BaseImagePreprocessingLayer):
13
13
  original image and the processed image. This operation adjusts the clarity
14
14
  of the edges in an image, ranging from blurred to enhanced sharpness.
15
15
 
16
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
17
+ (independently of which backend you're using).
18
+
16
19
  Args:
17
20
  factor: A tuple of two floats or a single float.
18
21
  `factor` controls the extent to which the image sharpness
@@ -23,6 +23,9 @@ class RandomShear(BaseImagePreprocessingLayer):
23
23
  regions created during the transformation are filled according to the
24
24
  `fill_mode` and `fill_value` parameters.
25
25
 
26
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
27
+ (independently of which backend you're using).
28
+
26
29
  Args:
27
30
  x_factor: A tuple of two floats. For each augmented image, a value
28
31
  is sampled from the provided range. If a float is passed, the
@@ -23,6 +23,9 @@ class RandomTranslation(BaseImagePreprocessingLayer):
23
23
  of integer or floating point dtype. By default, the layer will output
24
24
  floats.
25
25
 
26
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
27
+ (independently of which backend you're using).
28
+
26
29
  Input shape:
27
30
  3D (unbatched) or 4D (batched) tensor with shape:
28
31
  `(..., height, width, channels)`, in `"channels_last"` format,
@@ -34,9 +37,6 @@ class RandomTranslation(BaseImagePreprocessingLayer):
34
37
  or `(..., channels, target_height, target_width)`,
35
38
  in `"channels_first"` format.
36
39
 
37
- **Note:** This layer is safe to use inside a `tf.data` pipeline
38
- (independently of which backend you're using).
39
-
40
40
  Args:
41
41
  height_factor: a float represented as fraction of value, or a tuple of
42
42
  size 2 representing lower and upper bound for shifting vertically. A
@@ -24,6 +24,9 @@ class RandomZoom(BaseImagePreprocessingLayer):
24
24
  of integer or floating point dtype.
25
25
  By default, the layer will output floats.
26
26
 
27
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
28
+ (independently of which backend you're using).
29
+
27
30
  Input shape:
28
31
  3D (unbatched) or 4D (batched) tensor with shape:
29
32
  `(..., height, width, channels)`, in `"channels_last"` format,
@@ -35,9 +38,6 @@ class RandomZoom(BaseImagePreprocessingLayer):
35
38
  or `(..., channels, target_height, target_width)`,
36
39
  in `"channels_first"` format.
37
40
 
38
- **Note:** This layer is safe to use inside a `tf.data` pipeline
39
- (independently of which backend you're using).
40
-
41
41
  Args:
42
42
  height_factor: a float represented as fraction of value, or a tuple of
43
43
  size 2 representing lower and upper bound for zooming vertically.
@@ -21,6 +21,9 @@ class Resizing(BaseImagePreprocessingLayer):
21
21
  format. Input pixel values can be of any range
22
22
  (e.g. `[0., 1.)` or `[0, 255]`).
23
23
 
24
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
25
+ (independently of which backend you're using).
26
+
24
27
  Input shape:
25
28
  3D (unbatched) or 4D (batched) tensor with shape:
26
29
  `(..., height, width, channels)`, in `"channels_last"` format,
@@ -32,9 +35,6 @@ class Resizing(BaseImagePreprocessingLayer):
32
35
  or `(..., channels, target_height, target_width)`,
33
36
  in `"channels_first"` format.
34
37
 
35
- **Note:** This layer is safe to use inside a `tf.data` pipeline
36
- (independently of which backend you're using).
37
-
38
38
  Args:
39
39
  height: Integer, the height of the output shape.
40
40
  width: Integer, the width of the output shape.
@@ -15,6 +15,9 @@ class Solarization(BaseImagePreprocessingLayer):
15
15
  to all values. When created with specified `threshold` the layer only
16
16
  augments pixels that are above the `threshold` value.
17
17
 
18
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
19
+ (independently of which backend you're using).
20
+
18
21
  Args:
19
22
  addition_factor: (Optional) A tuple of two floats or a single float,
20
23
  between 0 and 1.
@@ -1,5 +1,5 @@
1
1
  from keras.src.api_export import keras_export
2
- from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer
2
+ from keras.src.layers.preprocessing.data_layer import DataLayer
3
3
 
4
4
  # mel spectrum constants.
5
5
  _MEL_BREAK_FREQUENCY_HERTZ = 700.0
@@ -7,7 +7,7 @@ _MEL_HIGH_FREQUENCY_Q = 1127.0
7
7
 
8
8
 
9
9
  @keras_export("keras.layers.MelSpectrogram")
10
- class MelSpectrogram(TFDataLayer):
10
+ class MelSpectrogram(DataLayer):
11
11
  """A preprocessing layer to convert raw audio signals to Mel spectrograms.
12
12
 
13
13
  This layer takes `float32`/`float64` single or batched audio signal as
@@ -24,10 +24,37 @@ class MelSpectrogram(TFDataLayer):
24
24
  speech and music processing tasks like speech recognition, speaker
25
25
  identification, and music genre classification.
26
26
 
27
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
28
+ (independently of which backend you're using).
29
+
27
30
  References:
28
31
  - [Spectrogram](https://en.wikipedia.org/wiki/Spectrogram),
29
32
  - [Mel scale](https://en.wikipedia.org/wiki/Mel_scale).
30
33
 
34
+ Args:
35
+ fft_length: Integer, size of the FFT window.
36
+ sequence_stride: Integer, number of samples between successive STFT
37
+ columns.
38
+ sequence_length: Integer, size of the window used for applying
39
+ `window` to each audio frame. If `None`, defaults to `fft_length`.
40
+ window: String, name of the window function to use. Available values
41
+ are `"hann"` and `"hamming"`. If `window` is a tensor, it will be
42
+ used directly as the window and its length must be
43
+ `sequence_length`. If `window` is `None`, no windowing is
44
+ used. Defaults to `"hann"`.
45
+ sampling_rate: Integer, sample rate of the input signal.
46
+ num_mel_bins: Integer, number of mel bins to generate.
47
+ min_freq: Float, minimum frequency of the mel bins.
48
+ max_freq: Float, maximum frequency of the mel bins.
49
+ If `None`, defaults to `sampling_rate / 2`.
50
+ power_to_db: If True, convert the power spectrogram to decibels.
51
+ top_db: Float, minimum negative cut-off `max(10 * log10(S)) - top_db`.
52
+ mag_exp: Float, exponent for the magnitude spectrogram.
53
+ 1 for magnitude, 2 for power, etc. Default is 2.
54
+ ref_power: Float, the power is scaled relative to it
55
+ `10 * log10(S / ref_power)`.
56
+ min_power: Float, minimum value for power and `ref_power`.
57
+
31
58
  Examples:
32
59
 
33
60
  **Unbatched audio signal**
@@ -55,29 +82,6 @@ class MelSpectrogram(TFDataLayer):
55
82
  2D (unbatched) or 3D (batched) tensor with
56
83
  shape:`(..., num_mel_bins, time)`.
57
84
 
58
- Args:
59
- fft_length: Integer, size of the FFT window.
60
- sequence_stride: Integer, number of samples between successive STFT
61
- columns.
62
- sequence_length: Integer, size of the window used for applying
63
- `window` to each audio frame. If `None`, defaults to `fft_length`.
64
- window: String, name of the window function to use. Available values
65
- are `"hann"` and `"hamming"`. If `window` is a tensor, it will be
66
- used directly as the window and its length must be
67
- `sequence_length`. If `window` is `None`, no windowing is
68
- used. Defaults to `"hann"`.
69
- sampling_rate: Integer, sample rate of the input signal.
70
- num_mel_bins: Integer, number of mel bins to generate.
71
- min_freq: Float, minimum frequency of the mel bins.
72
- max_freq: Float, maximum frequency of the mel bins.
73
- If `None`, defaults to `sampling_rate / 2`.
74
- power_to_db: If True, convert the power spectrogram to decibels.
75
- top_db: Float, minimum negative cut-off `max(10 * log10(S)) - top_db`.
76
- mag_exp: Float, exponent for the magnitude spectrogram.
77
- 1 for magnitude, 2 for power, etc. Default is 2.
78
- ref_power: Float, the power is scaled relative to it
79
- `10 * log10(S / ref_power)`.
80
- min_power: Float, minimum value for power and `ref_power`.
81
85
  """
82
86
 
83
87
  def __init__(
@@ -5,12 +5,12 @@ import numpy as np
5
5
  from keras.src import backend
6
6
  from keras.src import ops
7
7
  from keras.src.api_export import keras_export
8
- from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer
8
+ from keras.src.layers.preprocessing.data_layer import DataLayer
9
9
  from keras.src.utils.module_utils import tensorflow as tf
10
10
 
11
11
 
12
12
  @keras_export("keras.layers.Normalization")
13
- class Normalization(TFDataLayer):
13
+ class Normalization(DataLayer):
14
14
  """A preprocessing layer that normalizes continuous features.
15
15
 
16
16
  This layer will shift and scale inputs into a distribution centered around
@@ -23,6 +23,9 @@ class Normalization(TFDataLayer):
23
23
  variance of the data and store them as the layer's weights. `adapt()` should
24
24
  be called before `fit()`, `evaluate()`, or `predict()`.
25
25
 
26
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
27
+ (independently of which backend you're using).
28
+
26
29
  Args:
27
30
  axis: Integer, tuple of integers, or None. The axis or axes that should
28
31
  have a separate mean and variance for each index in the shape.
@@ -1,11 +1,11 @@
1
1
  from keras.src import backend
2
2
  from keras.src.api_export import keras_export
3
- from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer
3
+ from keras.src.layers.preprocessing.data_layer import DataLayer
4
4
  from keras.src.saving import serialization_lib
5
5
 
6
6
 
7
7
  @keras_export("keras.layers.Rescaling")
8
- class Rescaling(TFDataLayer):
8
+ class Rescaling(DataLayer):
9
9
  """A preprocessing layer which rescales input values to a new range.
10
10
 
11
11
  This layer rescales every value of an input (often an image) by multiplying
@@ -23,7 +23,7 @@ class Rescaling(TFDataLayer):
23
23
  of integer or floating point dtype, and by default the layer will output
24
24
  floats.
25
25
 
26
- **Note:** This layer is safe to use inside a `tf.data` pipeline
26
+ **Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
27
27
  (independently of which backend you're using).
28
28
 
29
29
  Args:
@@ -109,16 +109,16 @@ class Bidirectional(Layer):
109
109
  # Recreate the forward layer from the original layer config, so that it
110
110
  # will not carry over any state from the layer.
111
111
  config = serialization_lib.serialize_keras_object(layer)
112
- config["config"]["name"] = "forward_" + utils.removeprefix(
113
- layer.name, "forward_"
112
+ config["config"]["name"] = (
113
+ f"forward_{utils.removeprefix(layer.name, 'forward_')}"
114
114
  )
115
115
  self.forward_layer = serialization_lib.deserialize_keras_object(config)
116
116
 
117
117
  if backward_layer is None:
118
118
  config = serialization_lib.serialize_keras_object(layer)
119
119
  config["config"]["go_backwards"] = True
120
- config["config"]["name"] = "backward_" + utils.removeprefix(
121
- layer.name, "backward_"
120
+ config["config"]["name"] = (
121
+ f"backward_{utils.removeprefix(layer.name, 'backward_')}"
122
122
  )
123
123
  self.backward_layer = serialization_lib.deserialize_keras_object(
124
124
  config