keras-nightly 3.12.0.dev2025081903__py3-none-any.whl → 3.12.0.dev2025082103__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -778,7 +778,7 @@ def _cudnn_gru(
778
778
  return (
779
779
  last_output,
780
780
  outputs,
781
- state,
781
+ [state],
782
782
  )
783
783
 
784
784
 
@@ -176,6 +176,8 @@ class Attention(Layer):
176
176
  # Bias so padding positions do not contribute to attention
177
177
  # distribution. Note 65504. is the max float16 value.
178
178
  max_value = 65504.0 if scores.dtype == "float16" else 1.0e9
179
+ if len(padding_mask.shape) == 2:
180
+ padding_mask = ops.expand_dims(padding_mask, axis=-2)
179
181
  scores -= max_value * ops.cast(padding_mask, dtype=scores.dtype)
180
182
 
181
183
  weights = ops.softmax(scores, axis=-1)
@@ -167,14 +167,15 @@ class Lambda(Layer):
167
167
  )
168
168
 
169
169
  @staticmethod
170
- def _raise_for_lambda_deserialization(arg_name, safe_mode):
170
+ def _raise_for_lambda_deserialization(safe_mode):
171
171
  if safe_mode:
172
172
  raise ValueError(
173
- f"The `{arg_name}` of this `Lambda` layer is a Python lambda. "
174
- "Deserializing it is unsafe. If you trust the source of the "
175
- "config artifact, you can override this error "
176
- "by passing `safe_mode=False` "
177
- "to `from_config()`, or calling "
173
+ "Requested the deserialization of a `Lambda` layer whose "
174
+ "`function` is a Python lambda. This carries a potential risk "
175
+ "of arbitrary code execution and thus it is disallowed by "
176
+ "default. If you trust the source of the artifact, you can "
177
+ "override this error by passing `safe_mode=False` to the "
178
+ "loading function, or calling "
178
179
  "`keras.config.enable_unsafe_deserialization()."
179
180
  )
180
181
 
@@ -187,7 +188,7 @@ class Lambda(Layer):
187
188
  and "class_name" in fn_config
188
189
  and fn_config["class_name"] == "__lambda__"
189
190
  ):
190
- cls._raise_for_lambda_deserialization("function", safe_mode)
191
+ cls._raise_for_lambda_deserialization(safe_mode)
191
192
  inner_config = fn_config["config"]
192
193
  fn = python_utils.func_load(
193
194
  inner_config["code"],
@@ -206,7 +207,7 @@ class Lambda(Layer):
206
207
  and "class_name" in fn_config
207
208
  and fn_config["class_name"] == "__lambda__"
208
209
  ):
209
- cls._raise_for_lambda_deserialization("function", safe_mode)
210
+ cls._raise_for_lambda_deserialization(safe_mode)
210
211
  inner_config = fn_config["config"]
211
212
  fn = python_utils.func_load(
212
213
  inner_config["code"],
@@ -11,6 +11,7 @@ from keras.src.legacy.saving import json_utils
11
11
  from keras.src.legacy.saving import saving_options
12
12
  from keras.src.legacy.saving import saving_utils
13
13
  from keras.src.saving import object_registration
14
+ from keras.src.saving import serialization_lib
14
15
  from keras.src.utils import io_utils
15
16
 
16
17
  try:
@@ -72,7 +73,9 @@ def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True):
72
73
  f.close()
73
74
 
74
75
 
75
- def load_model_from_hdf5(filepath, custom_objects=None, compile=True):
76
+ def load_model_from_hdf5(
77
+ filepath, custom_objects=None, compile=True, safe_mode=True
78
+ ):
76
79
  """Loads a model saved via `save_model_to_hdf5`.
77
80
 
78
81
  Args:
@@ -128,7 +131,9 @@ def load_model_from_hdf5(filepath, custom_objects=None, compile=True):
128
131
  model_config = model_config.decode("utf-8")
129
132
  model_config = json_utils.decode(model_config)
130
133
 
131
- with saving_options.keras_option_scope(use_legacy_config=True):
134
+ legacy_scope = saving_options.keras_option_scope(use_legacy_config=True)
135
+ safe_mode_scope = serialization_lib.SafeModeScope(safe_mode)
136
+ with legacy_scope, safe_mode_scope:
132
137
  model = saving_utils.model_from_config(
133
138
  model_config, custom_objects=custom_objects
134
139
  )
@@ -1,4 +1,3 @@
1
- import json
2
1
  import threading
3
2
 
4
3
  from absl import logging
@@ -81,10 +80,6 @@ def model_from_config(config, custom_objects=None):
81
80
  function_dict["config"]["closure"] = function_config[2]
82
81
  config["config"]["function"] = function_dict
83
82
 
84
- # TODO(nkovela): Swap find and replace args during Keras 3.0 release
85
- # Replace keras refs with keras
86
- config = _find_replace_nested_dict(config, "keras.", "keras.")
87
-
88
83
  return serialization.deserialize_keras_object(
89
84
  config,
90
85
  module_objects=MODULE_OBJECTS.ALL_OBJECTS,
@@ -231,13 +226,6 @@ def _deserialize_metric(metric_config):
231
226
  return metrics_module.deserialize(metric_config)
232
227
 
233
228
 
234
- def _find_replace_nested_dict(config, find, replace):
235
- dict_str = json.dumps(config)
236
- dict_str = dict_str.replace(find, replace)
237
- config = json.loads(dict_str)
238
- return config
239
-
240
-
241
229
  def _resolve_compile_arguments_compat(obj, obj_config, module):
242
230
  """Resolves backwards compatibility issues with training config arguments.
243
231
 
@@ -2,7 +2,6 @@
2
2
 
3
3
  import contextlib
4
4
  import inspect
5
- import json
6
5
  import threading
7
6
  import weakref
8
7
 
@@ -485,12 +484,6 @@ def deserialize_keras_object(
485
484
  arg_spec = inspect.getfullargspec(cls.from_config)
486
485
  custom_objects = custom_objects or {}
487
486
 
488
- # TODO(nkovela): Swap find and replace args during Keras 3.0 release
489
- # Replace keras refs with keras
490
- cls_config = _find_replace_nested_dict(
491
- cls_config, "keras.", "keras."
492
- )
493
-
494
487
  if "custom_objects" in arg_spec.args:
495
488
  deserialized_obj = cls.from_config(
496
489
  cls_config,
@@ -565,10 +558,3 @@ def validate_config(config):
565
558
  def is_default(method):
566
559
  """Check if a method is decorated with the `default` wrapper."""
567
560
  return getattr(method, "_is_default", False)
568
-
569
-
570
- def _find_replace_nested_dict(config, find, replace):
571
- dict_str = json.dumps(config)
572
- dict_str = dict_str.replace(find, replace)
573
- config = json.loads(dict_str)
574
- return config
@@ -194,7 +194,10 @@ def load_model(filepath, custom_objects=None, compile=True, safe_mode=True):
194
194
  )
195
195
  if str(filepath).endswith((".h5", ".hdf5")):
196
196
  return legacy_h5_format.load_model_from_hdf5(
197
- filepath, custom_objects=custom_objects, compile=compile
197
+ filepath,
198
+ custom_objects=custom_objects,
199
+ compile=compile,
200
+ safe_mode=safe_mode,
198
201
  )
199
202
  elif str(filepath).endswith(".keras"):
200
203
  raise ValueError(
@@ -656,12 +656,12 @@ def deserialize_keras_object(
656
656
  if config["class_name"] == "__lambda__":
657
657
  if safe_mode:
658
658
  raise ValueError(
659
- "Requested the deserialization of a `lambda` object. "
660
- "This carries a potential risk of arbitrary code execution "
661
- "and thus it is disallowed by default. If you trust the "
662
- "source of the saved model, you can pass `safe_mode=False` to "
663
- "the loading function in order to allow `lambda` loading, "
664
- "or call `keras.config.enable_unsafe_deserialization()`."
659
+ "Requested the deserialization of a Python lambda. This "
660
+ "carries a potential risk of arbitrary code execution and thus "
661
+ "it is disallowed by default. If you trust the source of the "
662
+ "artifact, you can override this error by passing "
663
+ "`safe_mode=False` to the loading function, or calling "
664
+ "`keras.config.enable_unsafe_deserialization()."
665
665
  )
666
666
  return python_utils.func_load(inner_config["value"])
667
667
  if tf is not None and config["class_name"] == "__typespec__":
@@ -411,7 +411,7 @@ def paths_and_labels_to_dataset(
411
411
  """Constructs a fixed-size dataset of audio and labels."""
412
412
  path_ds = tf.data.Dataset.from_tensor_slices(file_paths)
413
413
  if label_mode:
414
- label_ds = dataset_utils.labels_to_dataset(
414
+ label_ds = dataset_utils.labels_to_dataset_tf(
415
415
  labels, label_mode, num_classes
416
416
  )
417
417
  ds = tf.data.Dataset.zip((path_ds, label_ds))
@@ -8,7 +8,9 @@ import numpy as np
8
8
 
9
9
  from keras.src import tree
10
10
  from keras.src.api_export import keras_export
11
+ from keras.src.utils import file_utils
11
12
  from keras.src.utils import io_utils
13
+ from keras.src.utils.module_utils import grain
12
14
  from keras.src.utils.module_utils import tensorflow as tf
13
15
 
14
16
 
@@ -299,6 +301,17 @@ def is_torch_dataset(dataset):
299
301
  return False
300
302
 
301
303
 
304
+ def is_grain_dataset(dataset):
305
+ if hasattr(dataset, "__class__"):
306
+ for parent in dataset.__class__.__mro__:
307
+ if parent.__name__ in (
308
+ "MapDataset",
309
+ "IterDataset",
310
+ ) and str(parent.__module__).startswith("grain._src.python"):
311
+ return True
312
+ return False
313
+
314
+
302
315
  def _rescale_dataset_split_sizes(left_size, right_size, total_length):
303
316
  """Rescale the dataset split sizes.
304
317
 
@@ -476,6 +489,10 @@ def _get_type_spec(dataset):
476
489
  from torch.utils.data import Dataset as TorchDataset
477
490
 
478
491
  return TorchDataset
492
+ elif is_grain_dataset(dataset):
493
+ from grain import MapDataset
494
+
495
+ return MapDataset
479
496
  else:
480
497
  return None
481
498
 
@@ -525,10 +542,17 @@ def index_directory(
525
542
  - class_names: names of the classes corresponding to these labels, in
526
543
  order.
527
544
  """
545
+ if file_utils.is_remote_path(directory):
546
+ os_module = tf.io.gfile
547
+ path_module = tf.io.gfile
548
+ else:
549
+ os_module = os
550
+ path_module = os.path
551
+
528
552
  if labels == "inferred":
529
553
  subdirs = []
530
- for subdir in sorted(tf.io.gfile.listdir(directory)):
531
- if tf.io.gfile.isdir(tf.io.gfile.join(directory, subdir)):
554
+ for subdir in sorted(os_module.listdir(directory)):
555
+ if path_module.isdir(path_module.join(directory, subdir)):
532
556
  if not subdir.startswith("."):
533
557
  if subdir.endswith("/"):
534
558
  subdir = subdir[:-1]
@@ -566,7 +590,7 @@ def index_directory(
566
590
  results = []
567
591
  filenames = []
568
592
 
569
- for dirpath in (tf.io.gfile.join(directory, subdir) for subdir in subdirs):
593
+ for dirpath in (path_module.join(directory, subdir) for subdir in subdirs):
570
594
  results.append(
571
595
  pool.apply_async(
572
596
  index_subdirectory,
@@ -608,7 +632,7 @@ def index_directory(
608
632
  )
609
633
  pool.close()
610
634
  pool.join()
611
- file_paths = [tf.io.gfile.join(directory, fname) for fname in filenames]
635
+ file_paths = [path_module.join(directory, fname) for fname in filenames]
612
636
 
613
637
  if shuffle:
614
638
  # Shuffle globally to erase macro-structure
@@ -623,8 +647,10 @@ def index_directory(
623
647
 
624
648
 
625
649
  def iter_valid_files(directory, follow_links, formats):
650
+ io_module = tf.io.gfile if file_utils.is_remote_path(directory) else os
651
+
626
652
  if not follow_links:
627
- walk = tf.io.gfile.walk(directory)
653
+ walk = io_module.walk(directory)
628
654
  else:
629
655
  walk = os.walk(directory, followlinks=follow_links)
630
656
  for root, _, files in sorted(walk, key=lambda x: x[0]):
@@ -648,14 +674,18 @@ def index_subdirectory(directory, class_indices, follow_links, formats):
648
674
  paths, and `labels` is a list of integer labels corresponding
649
675
  to these files.
650
676
  """
677
+ path_module = (
678
+ tf.io.gfile if file_utils.is_remote_path(directory) else os.path
679
+ )
680
+
651
681
  dirname = os.path.basename(directory)
652
682
  valid_files = iter_valid_files(directory, follow_links, formats)
653
683
  labels = []
654
684
  filenames = []
655
685
  for root, fname in valid_files:
656
686
  labels.append(class_indices[dirname])
657
- absolute_path = tf.io.gfile.join(root, fname)
658
- relative_path = tf.io.gfile.join(
687
+ absolute_path = path_module.join(root, fname)
688
+ relative_path = path_module.join(
659
689
  dirname, os.path.relpath(absolute_path, directory)
660
690
  )
661
691
  filenames.append(relative_path)
@@ -700,7 +730,7 @@ def get_training_or_validation_split(samples, labels, validation_split, subset):
700
730
  return samples, labels
701
731
 
702
732
 
703
- def labels_to_dataset(labels, label_mode, num_classes):
733
+ def labels_to_dataset_tf(labels, label_mode, num_classes):
704
734
  """Create a `tf.data.Dataset` from the list/tuple of labels.
705
735
 
706
736
  Args:
@@ -730,6 +760,51 @@ def labels_to_dataset(labels, label_mode, num_classes):
730
760
  return label_ds
731
761
 
732
762
 
763
+ def labels_to_dataset_grain(labels, label_mode, num_classes):
764
+ """Create a `grain.MapDataset` from the list/tuple of labels.
765
+
766
+ Args:
767
+ labels: list/tuple of labels to be converted into a `grain.MapDataset`.
768
+ label_mode: String describing the encoding of `labels`. Options are:
769
+ - `"binary"` indicates that the labels (there can be only 2) are encoded
770
+ as `float32` scalars with values 0 or 1
771
+ (e.g. for `binary_crossentropy`).
772
+ - `"categorical"` means that the labels are mapped into a categorical
773
+ vector. (e.g. for `categorical_crossentropy` loss).
774
+ num_classes: number of classes of labels.
775
+
776
+ Returns:
777
+ A `grain.MapDataset` instance.
778
+ """
779
+ from keras.src import backend
780
+ from keras.src import ops
781
+
782
+ if label_mode not in ("binary", "categorical", "int"):
783
+ raise ValueError(
784
+ f"Invalid `label_mode`: {label_mode}. "
785
+ "Expected one of: 'binary', 'categorical', 'int'."
786
+ )
787
+
788
+ def preprocess_labels_in_cpu(label_mode, x, num_classes):
789
+ with backend.device_scope("cpu"):
790
+ if label_mode == "binary":
791
+ return ops.expand_dims(
792
+ ops.convert_to_tensor(x, dtype="float32"), axis=-1
793
+ )
794
+ elif label_mode == "categorical":
795
+ return ops.one_hot(
796
+ ops.convert_to_tensor(x, dtype="int32"), num_classes
797
+ )
798
+ else:
799
+ return ops.convert_to_tensor(x, dtype="int32")
800
+
801
+ label_ds = grain.MapDataset.source(labels)
802
+ label_ds = label_ds.map(
803
+ lambda x: preprocess_labels_in_cpu(label_mode, x, num_classes),
804
+ )
805
+ return label_ds
806
+
807
+
733
808
  def check_validation_split_arg(validation_split, subset, shuffle, seed):
734
809
  """Raise errors in case of invalid argument values.
735
810
 
@@ -0,0 +1,33 @@
1
+ from keras.src import backend
2
+ from keras.src import tree
3
+
4
+
5
+ def make_batch(values):
6
+ from keras.src import ops
7
+
8
+ if not values:
9
+ raise ValueError("Cannot batch 0 values. Please file a bug.")
10
+
11
+ with backend.device_scope("cpu"):
12
+ return tree.map_structure(lambda *xs: ops.stack(xs), *values)
13
+
14
+
15
+ def make_string_batch(values):
16
+ from keras.src import ops
17
+
18
+ if not values:
19
+ raise ValueError("Cannot batch 0 values. Please file a bug.")
20
+
21
+ def batch_fn(*xs):
22
+ if isinstance(xs[0], str):
23
+ if backend.backend() == "tensorflow":
24
+ import tensorflow as tf
25
+
26
+ xs = [tf.convert_to_tensor(x, dtype=tf.string) for x in xs]
27
+ xs = tf.stack(xs)
28
+ return xs
29
+ else:
30
+ return ops.stack(xs)
31
+
32
+ with backend.device_scope("cpu"):
33
+ return tree.map_structure(batch_fn, *values)
@@ -1,11 +1,27 @@
1
+ import io
2
+ import pathlib
3
+
1
4
  import numpy as np
2
5
 
3
6
  from keras.src.api_export import keras_export
4
7
  from keras.src.backend.config import standardize_data_format
5
8
  from keras.src.utils import dataset_utils
6
9
  from keras.src.utils import image_utils
10
+ from keras.src.utils.grain_utils import make_batch
11
+ from keras.src.utils.module_utils import grain
7
12
  from keras.src.utils.module_utils import tensorflow as tf
8
13
 
14
+ try:
15
+ from PIL import Image as pil_image
16
+
17
+ try:
18
+ pil_image_resampling = pil_image.Resampling
19
+ except AttributeError:
20
+ pil_image_resampling = pil_image
21
+ except ImportError:
22
+ pil_image = None
23
+ pil_image_resampling = None
24
+
9
25
  ALLOWLIST_FORMATS = (".bmp", ".gif", ".jpeg", ".jpg", ".png")
10
26
 
11
27
 
@@ -32,9 +48,10 @@ def image_dataset_from_directory(
32
48
  crop_to_aspect_ratio=False,
33
49
  pad_to_aspect_ratio=False,
34
50
  data_format=None,
51
+ format="tf",
35
52
  verbose=True,
36
53
  ):
37
- """Generates a `tf.data.Dataset` from image files in a directory.
54
+ """Generates a dataset from image files in a directory.
38
55
 
39
56
  If your directory structure is:
40
57
 
@@ -49,13 +66,17 @@ def image_dataset_from_directory(
49
66
  ```
50
67
 
51
68
  Then calling `image_dataset_from_directory(main_directory,
52
- labels='inferred')` will return a `tf.data.Dataset` that yields batches of
69
+ labels='inferred')` will return a dataset that yields batches of
53
70
  images from the subdirectories `class_a` and `class_b`, together with labels
54
71
  0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`).
55
72
 
56
73
  Supported image formats: `.jpeg`, `.jpg`, `.png`, `.bmp`, `.gif`.
57
74
  Animated gifs are truncated to the first frame.
58
75
 
76
+ By default, this function will return a `tf.data.Dataset` object. You can
77
+ set `format="grain"` to return a `grain.IterDataset` object instead, which
78
+ removes the TensorFlow dependency.
79
+
59
80
  Args:
60
81
  directory: Directory where the data is located.
61
82
  If `labels` is `"inferred"`, it should contain
@@ -125,12 +146,19 @@ def image_dataset_from_directory(
125
146
  preserved.
126
147
  data_format: If None uses keras.config.image_data_format()
127
148
  otherwise either 'channel_last' or 'channel_first'.
149
+ format: The format of the return object. Defaults to `"tf"`. Available
150
+ options are:
151
+ - `"tf"`: returns a `tf.data.Dataset` object. Requires
152
+ TensorFlow to be installed.
153
+ - `"grain"`: returns a `grain.IterDataset` object. Requires
154
+ Grain to be installed.
128
155
  verbose: Whether to display number information on classes and
129
156
  number of files found. Defaults to `True`.
130
157
 
131
158
  Returns:
132
159
 
133
- A `tf.data.Dataset` object.
160
+ A `tf.data.Dataset` (`format="tf"`) or `grain.IterDataset`
161
+ (`format="grain"`) object.
134
162
 
135
163
  - If `label_mode` is `None`, it yields `float32` tensors of shape
136
164
  `(batch_size, image_size[0], image_size[1], num_channels)`,
@@ -222,6 +250,11 @@ def image_dataset_from_directory(
222
250
  f"{supported_interpolations}. "
223
251
  f"Received: interpolation={interpolation}"
224
252
  )
253
+ if format not in ("tf", "grain"):
254
+ raise ValueError(
255
+ '`format` should be either "tf" or "grain". '
256
+ f"Received: format={format}"
257
+ )
225
258
 
226
259
  dataset_utils.check_validation_split_arg(
227
260
  validation_split, subset, shuffle, seed
@@ -289,6 +322,7 @@ def image_dataset_from_directory(
289
322
  shuffle=shuffle,
290
323
  shuffle_buffer_size=shuffle_buffer_size,
291
324
  seed=seed,
325
+ format=format,
292
326
  )
293
327
 
294
328
  val_dataset = paths_and_labels_to_dataset(
@@ -303,14 +337,23 @@ def image_dataset_from_directory(
303
337
  pad_to_aspect_ratio=pad_to_aspect_ratio,
304
338
  data_format=data_format,
305
339
  shuffle=False,
340
+ format=format,
306
341
  )
307
342
 
308
- if batch_size is not None:
309
- train_dataset = train_dataset.batch(batch_size)
310
- val_dataset = val_dataset.batch(batch_size)
311
-
312
- train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE)
313
- val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE)
343
+ if format == "tf":
344
+ if batch_size is not None:
345
+ train_dataset = train_dataset.batch(batch_size)
346
+ val_dataset = val_dataset.batch(batch_size)
347
+ train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE)
348
+ val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE)
349
+ else:
350
+ train_dataset = train_dataset.to_iter_dataset()
351
+ val_dataset = val_dataset.to_iter_dataset()
352
+ if batch_size is not None:
353
+ train_dataset = train_dataset.batch(
354
+ batch_size, batch_fn=make_batch
355
+ )
356
+ val_dataset = val_dataset.batch(batch_size, batch_fn=make_batch)
314
357
 
315
358
  # Users may need to reference `class_names`.
316
359
  train_dataset.class_names = class_names
@@ -345,12 +388,18 @@ def image_dataset_from_directory(
345
388
  shuffle=shuffle,
346
389
  shuffle_buffer_size=shuffle_buffer_size,
347
390
  seed=seed,
391
+ format=format,
348
392
  )
349
393
 
350
- if batch_size is not None:
351
- dataset = dataset.batch(batch_size)
394
+ if format == "tf":
395
+ if batch_size is not None:
396
+ dataset = dataset.batch(batch_size)
397
+ dataset = dataset.prefetch(tf.data.AUTOTUNE)
398
+ else:
399
+ dataset = dataset.to_iter_dataset()
400
+ if batch_size is not None:
401
+ dataset = dataset.batch(batch_size, batch_fn=make_batch)
352
402
 
353
- dataset = dataset.prefetch(tf.data.AUTOTUNE)
354
403
  # Users may need to reference `class_names`.
355
404
  dataset.class_names = class_names
356
405
 
@@ -374,11 +423,66 @@ def paths_and_labels_to_dataset(
374
423
  shuffle=False,
375
424
  shuffle_buffer_size=None,
376
425
  seed=None,
426
+ format="tf",
427
+ ):
428
+ """Constructs a dataset of images and labels."""
429
+ if format == "tf":
430
+ return _paths_and_labels_to_dataset_tf(
431
+ image_paths=image_paths,
432
+ image_size=image_size,
433
+ num_channels=num_channels,
434
+ labels=labels,
435
+ label_mode=label_mode,
436
+ num_classes=num_classes,
437
+ interpolation=interpolation,
438
+ data_format=data_format,
439
+ crop_to_aspect_ratio=crop_to_aspect_ratio,
440
+ pad_to_aspect_ratio=pad_to_aspect_ratio,
441
+ shuffle=shuffle,
442
+ shuffle_buffer_size=shuffle_buffer_size,
443
+ seed=seed,
444
+ )
445
+ elif format == "grain":
446
+ return _paths_and_labels_to_dataset_grain(
447
+ image_paths=image_paths,
448
+ image_size=image_size,
449
+ num_channels=num_channels,
450
+ labels=labels,
451
+ label_mode=label_mode,
452
+ num_classes=num_classes,
453
+ interpolation=interpolation,
454
+ data_format=data_format,
455
+ crop_to_aspect_ratio=crop_to_aspect_ratio,
456
+ pad_to_aspect_ratio=pad_to_aspect_ratio,
457
+ shuffle=shuffle,
458
+ seed=seed,
459
+ )
460
+ else:
461
+ raise ValueError(
462
+ '`format` should be either "tf" or "grain". '
463
+ f"Received: format={format}"
464
+ )
465
+
466
+
467
+ def _paths_and_labels_to_dataset_tf(
468
+ image_paths,
469
+ image_size,
470
+ num_channels,
471
+ labels,
472
+ label_mode,
473
+ num_classes,
474
+ interpolation,
475
+ data_format,
476
+ crop_to_aspect_ratio=False,
477
+ pad_to_aspect_ratio=False,
478
+ shuffle=False,
479
+ shuffle_buffer_size=None,
480
+ seed=None,
377
481
  ):
378
482
  """Constructs a dataset of images and labels."""
379
483
  path_ds = tf.data.Dataset.from_tensor_slices(image_paths)
380
484
  if label_mode:
381
- label_ds = dataset_utils.labels_to_dataset(
485
+ label_ds = dataset_utils.labels_to_dataset_tf(
382
486
  labels, label_mode, num_classes
383
487
  )
384
488
  ds = tf.data.Dataset.zip((path_ds, label_ds))
@@ -398,17 +502,18 @@ def paths_and_labels_to_dataset(
398
502
  )
399
503
  if label_mode:
400
504
  ds = ds.map(
401
- lambda x, y: (load_image(x, *args), y),
505
+ lambda x, y: (_load_image_tf(x, *args), y),
402
506
  num_parallel_calls=tf.data.AUTOTUNE,
403
507
  )
404
508
  else:
405
509
  ds = ds.map(
406
- lambda x: load_image(x, *args), num_parallel_calls=tf.data.AUTOTUNE
510
+ lambda x: _load_image_tf(x, *args),
511
+ num_parallel_calls=tf.data.AUTOTUNE,
407
512
  )
408
513
  return ds
409
514
 
410
515
 
411
- def load_image(
516
+ def _load_image_tf(
412
517
  path,
413
518
  image_size,
414
519
  num_channels,
@@ -457,3 +562,120 @@ def load_image(
457
562
  else:
458
563
  img.set_shape((num_channels, image_size[0], image_size[1]))
459
564
  return img
565
+
566
+
567
+ def _paths_and_labels_to_dataset_grain(
568
+ image_paths,
569
+ image_size,
570
+ num_channels,
571
+ labels,
572
+ label_mode,
573
+ num_classes,
574
+ interpolation,
575
+ data_format,
576
+ crop_to_aspect_ratio=False,
577
+ pad_to_aspect_ratio=False,
578
+ shuffle=False,
579
+ seed=None,
580
+ ):
581
+ """Constructs a dataset of images and labels."""
582
+ path_ds = grain.MapDataset.source(image_paths)
583
+ if label_mode:
584
+ label_ds = dataset_utils.labels_to_dataset_grain(
585
+ labels, label_mode, num_classes
586
+ )
587
+ ds = grain.experimental.ZipMapDataset([path_ds, label_ds])
588
+ else:
589
+ ds = path_ds
590
+
591
+ if shuffle:
592
+ ds = ds.shuffle(seed=seed)
593
+
594
+ args = (
595
+ image_size,
596
+ num_channels,
597
+ interpolation,
598
+ data_format,
599
+ crop_to_aspect_ratio,
600
+ pad_to_aspect_ratio,
601
+ )
602
+ if label_mode:
603
+ ds = ds.map(lambda data: (_load_image_grain(data[0], *args), data[1]))
604
+ else:
605
+ ds = ds.map(lambda x: _load_image_grain(x, *args))
606
+
607
+ return ds
608
+
609
+
610
+ def _load_image_grain(
611
+ path,
612
+ image_size,
613
+ num_channels,
614
+ interpolation,
615
+ data_format,
616
+ crop_to_aspect_ratio=False,
617
+ pad_to_aspect_ratio=False,
618
+ ):
619
+ """Load an image from a path and resize it."""
620
+ from keras.src import backend
621
+ from keras.src import ops
622
+
623
+ if pil_image is None:
624
+ raise ImportError(
625
+ "Could not import PIL.Image. The use of `load_img` requires PIL."
626
+ )
627
+ if pad_to_aspect_ratio and crop_to_aspect_ratio:
628
+ raise ValueError(
629
+ "Only one of `pad_to_aspect_ratio`, `crop_to_aspect_ratio`"
630
+ " can be set to `True`."
631
+ )
632
+
633
+ if isinstance(path, io.BytesIO):
634
+ img = pil_image.open(path)
635
+ elif isinstance(path, (pathlib.Path, bytes, str)):
636
+ if isinstance(path, pathlib.Path):
637
+ path = str(path.resolve())
638
+ img = pil_image.open(path)
639
+ else:
640
+ raise TypeError(
641
+ f"path should be path-like or io.BytesIO, not {type(path)}"
642
+ )
643
+ if num_channels == 1:
644
+ # if image is not already an 8-bit, 16-bit or 32-bit grayscale image
645
+ # convert it to an 8-bit grayscale image.
646
+ if img.mode not in ("L", "I;16", "I"):
647
+ img = img.convert("L")
648
+ elif num_channels == 4:
649
+ if img.mode != "RGBA":
650
+ img = img.convert("RGBA")
651
+ elif num_channels == 3:
652
+ if img.mode != "RGB":
653
+ img = img.convert("RGB")
654
+ else:
655
+ raise ValueError(
656
+ "num_channels must be 1, 3 or 4. "
657
+ f"Received: num_channels={num_channels}"
658
+ )
659
+
660
+ with backend.device_scope("cpu"):
661
+ img = ops.convert_to_tensor(np.array(img), dtype="float32")
662
+ if len(img.shape) == 2:
663
+ # If the image is grayscale, expand dims to add channel axis.
664
+ # The reason is that `ops.image.resize` expects 3D or 4D tensors.
665
+ img = ops.expand_dims(img, axis=-1)
666
+ if data_format == "channels_first":
667
+ img = ops.transpose(img, (2, 0, 1))
668
+ img = ops.image.resize(
669
+ img,
670
+ size=image_size,
671
+ interpolation=interpolation,
672
+ crop_to_aspect_ratio=crop_to_aspect_ratio,
673
+ pad_to_aspect_ratio=pad_to_aspect_ratio,
674
+ data_format=data_format,
675
+ )
676
+ if backend.backend() == "tensorflow":
677
+ if data_format == "channels_last":
678
+ img.set_shape((image_size[0], image_size[1], num_channels))
679
+ else:
680
+ img.set_shape((num_channels, image_size[0], image_size[1]))
681
+ return img
@@ -58,3 +58,4 @@ torch_xla = LazyModule(
58
58
  optree = LazyModule("optree")
59
59
  dmtree = LazyModule("tree")
60
60
  tf2onnx = LazyModule("tf2onnx")
61
+ grain = LazyModule("grain")
@@ -2,6 +2,8 @@ import numpy as np
2
2
 
3
3
  from keras.src.api_export import keras_export
4
4
  from keras.src.utils import dataset_utils
5
+ from keras.src.utils.grain_utils import make_string_batch
6
+ from keras.src.utils.module_utils import grain
5
7
  from keras.src.utils.module_utils import tensorflow as tf
6
8
 
7
9
 
@@ -23,9 +25,10 @@ def text_dataset_from_directory(
23
25
  validation_split=None,
24
26
  subset=None,
25
27
  follow_links=False,
28
+ format="tf",
26
29
  verbose=True,
27
30
  ):
28
- """Generates a `tf.data.Dataset` from text files in a directory.
31
+ """Generates a dataset from text files in a directory.
29
32
 
30
33
  If your directory structure is:
31
34
 
@@ -40,12 +43,16 @@ def text_dataset_from_directory(
40
43
  ```
41
44
 
42
45
  Then calling `text_dataset_from_directory(main_directory,
43
- labels='inferred')` will return a `tf.data.Dataset` that yields batches of
46
+ labels='inferred')` will return a dataset that yields batches of
44
47
  texts from the subdirectories `class_a` and `class_b`, together with labels
45
48
  0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`).
46
49
 
47
50
  Only `.txt` files are supported at this time.
48
51
 
52
+ By default, this function will return a `tf.data.Dataset` object. You can
53
+ set `format="grain"` to return a `grain.IterDataset` object instead, which
54
+ removes the TensorFlow dependency.
55
+
49
56
  Args:
50
57
  directory: Directory where the data is located.
51
58
  If `labels` is `"inferred"`, it should contain
@@ -91,19 +98,34 @@ def text_dataset_from_directory(
91
98
  (the training and validation datasets respectively).
92
99
  follow_links: Whether to visits subdirectories pointed to by symlinks.
93
100
  Defaults to `False`.
101
+ format: The format of the return object. Defaults to `"tf"`. Available
102
+ options are:
103
+ - `"tf"`: returns a `tf.data.Dataset` object. Requires
104
+ TensorFlow to be installed.
105
+ - `"grain"`: returns a `grain.IterDataset` object. Requires
106
+ Grain to be installed.
94
107
  verbose: Whether to display number information on classes and
95
108
  number of files found. Defaults to `True`.
96
109
 
97
110
  Returns:
98
111
 
99
- A `tf.data.Dataset` object.
112
+ A `tf.data.Dataset` (`format="tf"`) or `grain.IterDataset`
113
+ (`format="grain"`) object.
100
114
 
115
+ When `format="tf"`:
101
116
  - If `label_mode` is `None`, it yields `string` tensors of shape
102
117
  `(batch_size,)`, containing the contents of a batch of text files.
103
118
  - Otherwise, it yields a tuple `(texts, labels)`, where `texts`
104
119
  has shape `(batch_size,)` and `labels` follows the format described
105
120
  below.
106
121
 
122
+ When `format="grain"`:
123
+ - If `label_mode` is `None`, it yields a list of Python strings containing
124
+ the contents of a batch of text files.
125
+ - Otherwise, it yields a tuple `(texts, labels)`, where `texts`
126
+ is a list of Python strings and `labels` follows the format described
127
+ below.
128
+
107
129
  Rules regarding labels format:
108
130
 
109
131
  - if `label_mode` is `int`, the labels are an `int32` tensor of shape
@@ -137,6 +159,11 @@ def text_dataset_from_directory(
137
159
  '"categorical", "binary", '
138
160
  f"or None. Received: label_mode={label_mode}"
139
161
  )
162
+ if format not in ("tf", "grain"):
163
+ raise ValueError(
164
+ '`format` should be either "tf" or "grain". '
165
+ f"Received: format={format}"
166
+ )
140
167
  if labels is None or label_mode is None:
141
168
  labels = None
142
169
  label_mode = None
@@ -199,6 +226,7 @@ def text_dataset_from_directory(
199
226
  shuffle=shuffle,
200
227
  shuffle_buffer_size=shuffle_buffer_size,
201
228
  seed=seed,
229
+ format=format,
202
230
  )
203
231
  val_dataset = paths_and_labels_to_dataset(
204
232
  file_paths=file_paths_val,
@@ -207,14 +235,25 @@ def text_dataset_from_directory(
207
235
  num_classes=len(class_names) if class_names else 0,
208
236
  max_length=max_length,
209
237
  shuffle=False,
238
+ format=format,
210
239
  )
211
240
 
212
- if batch_size is not None:
213
- train_dataset = train_dataset.batch(batch_size)
214
- val_dataset = val_dataset.batch(batch_size)
215
-
216
- train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE)
217
- val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE)
241
+ if format == "tf":
242
+ if batch_size is not None:
243
+ train_dataset = train_dataset.batch(batch_size)
244
+ val_dataset = val_dataset.batch(batch_size)
245
+ train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE)
246
+ val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE)
247
+ else:
248
+ train_dataset = train_dataset.to_iter_dataset()
249
+ val_dataset = val_dataset.to_iter_dataset()
250
+ if batch_size is not None:
251
+ train_dataset = train_dataset.batch(
252
+ batch_size, batch_fn=make_string_batch
253
+ )
254
+ val_dataset = val_dataset.batch(
255
+ batch_size, batch_fn=make_string_batch
256
+ )
218
257
 
219
258
  # Users may need to reference `class_names`.
220
259
  train_dataset.class_names = class_names
@@ -238,10 +277,17 @@ def text_dataset_from_directory(
238
277
  shuffle=shuffle,
239
278
  shuffle_buffer_size=shuffle_buffer_size,
240
279
  seed=seed,
280
+ format=format,
241
281
  )
242
- if batch_size is not None:
243
- dataset = dataset.batch(batch_size)
244
- dataset = dataset.prefetch(tf.data.AUTOTUNE)
282
+
283
+ if format == "tf":
284
+ if batch_size is not None:
285
+ dataset = dataset.batch(batch_size)
286
+ dataset = dataset.prefetch(tf.data.AUTOTUNE)
287
+ else:
288
+ dataset = dataset.to_iter_dataset()
289
+ if batch_size is not None:
290
+ dataset = dataset.batch(batch_size, batch_fn=make_string_batch)
245
291
 
246
292
  # Users may need to reference `class_names`.
247
293
  dataset.class_names = class_names
@@ -257,11 +303,47 @@ def paths_and_labels_to_dataset(
257
303
  shuffle=False,
258
304
  shuffle_buffer_size=None,
259
305
  seed=None,
306
+ format="tf",
307
+ ):
308
+ """Constructs a dataset of text strings and labels."""
309
+ if format == "tf":
310
+ return _paths_and_labels_to_dataset_tf(
311
+ file_paths,
312
+ labels,
313
+ label_mode,
314
+ num_classes,
315
+ max_length,
316
+ shuffle,
317
+ shuffle_buffer_size,
318
+ seed,
319
+ )
320
+ elif format == "grain":
321
+ return _paths_and_labels_to_dataset_grain(
322
+ file_paths,
323
+ labels,
324
+ label_mode,
325
+ num_classes,
326
+ max_length,
327
+ shuffle,
328
+ shuffle_buffer_size,
329
+ seed,
330
+ )
331
+
332
+
333
+ def _paths_and_labels_to_dataset_tf(
334
+ file_paths,
335
+ labels,
336
+ label_mode,
337
+ num_classes,
338
+ max_length,
339
+ shuffle=False,
340
+ shuffle_buffer_size=None,
341
+ seed=None,
260
342
  ):
261
343
  """Constructs a dataset of text strings and labels."""
262
344
  path_ds = tf.data.Dataset.from_tensor_slices(file_paths)
263
345
  if label_mode:
264
- label_ds = dataset_utils.labels_to_dataset(
346
+ label_ds = dataset_utils.labels_to_dataset_tf(
265
347
  labels, label_mode, num_classes
266
348
  )
267
349
  ds = tf.data.Dataset.zip((path_ds, label_ds))
@@ -273,19 +355,62 @@ def paths_and_labels_to_dataset(
273
355
 
274
356
  if label_mode:
275
357
  ds = ds.map(
276
- lambda x, y: (path_to_string_content(x, max_length), y),
358
+ lambda x, y: (_path_to_string_content_tf(x, max_length), y),
277
359
  num_parallel_calls=tf.data.AUTOTUNE,
278
360
  )
279
361
  else:
280
362
  ds = ds.map(
281
- lambda x: path_to_string_content(x, max_length),
363
+ lambda x: _path_to_string_content_tf(x, max_length),
282
364
  num_parallel_calls=tf.data.AUTOTUNE,
283
365
  )
284
366
  return ds
285
367
 
286
368
 
287
- def path_to_string_content(path, max_length):
369
+ def _path_to_string_content_tf(path, max_length):
288
370
  txt = tf.io.read_file(path)
289
371
  if max_length is not None:
290
372
  txt = tf.strings.substr(txt, 0, max_length)
291
373
  return txt
374
+
375
+
376
+ def _paths_and_labels_to_dataset_grain(
377
+ file_paths,
378
+ labels,
379
+ label_mode,
380
+ num_classes,
381
+ max_length,
382
+ shuffle=False,
383
+ shuffle_buffer_size=None,
384
+ seed=None,
385
+ ):
386
+ """Constructs a dataset of text strings and labels."""
387
+ path_ds = grain.MapDataset.source(file_paths)
388
+ if label_mode:
389
+ label_ds = dataset_utils.labels_to_dataset_grain(
390
+ labels, label_mode, num_classes
391
+ )
392
+ ds = grain.experimental.ZipMapDataset([path_ds, label_ds])
393
+ else:
394
+ ds = path_ds
395
+
396
+ if shuffle:
397
+ ds = ds.shuffle(seed=seed)
398
+
399
+ if label_mode:
400
+ ds = ds.map(
401
+ lambda data: (
402
+ _path_to_string_content_grain(data[0], max_length),
403
+ data[1],
404
+ ),
405
+ )
406
+ else:
407
+ ds = ds.map(lambda x: _path_to_string_content_grain(x, max_length))
408
+ return ds
409
+
410
+
411
+ def _path_to_string_content_grain(path, max_length):
412
+ with open(path, "r") as f:
413
+ txt = f.read()
414
+ if max_length is not None:
415
+ txt = txt[:max_length]
416
+ return txt
@@ -172,10 +172,10 @@ class TorchModuleWrapper(Layer):
172
172
  "Requested the deserialization of a `torch.nn.Module` "
173
173
  "object via `torch.load()`. This carries a potential risk "
174
174
  "of arbitrary code execution and thus it is disallowed by "
175
- "default. If you trust the source of the saved model, you "
176
- "can pass `safe_mode=False` to the loading function in "
177
- "order to allow `torch.nn.Module` loading, or call "
178
- "`keras.config.enable_unsafe_deserialization()`."
175
+ "default. If you trust the source of the artifact, you can "
176
+ "override this error by passing `safe_mode=False` to the "
177
+ "loading function, or calling "
178
+ "`keras.config.enable_unsafe_deserialization()."
179
179
  )
180
180
 
181
181
  # Decode the base64 string back to bytes
keras/src/version.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from keras.src.api_export import keras_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "3.12.0.dev2025081903"
4
+ __version__ = "3.12.0.dev2025082103"
5
5
 
6
6
 
7
7
  @keras_export("keras.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: keras-nightly
3
- Version: 3.12.0.dev2025081903
3
+ Version: 3.12.0.dev2025082103
4
4
  Summary: Multi-backend Keras
5
5
  Author-email: Keras team <keras-users@googlegroups.com>
6
6
  License: Apache License 2.0
@@ -126,7 +126,7 @@ keras/regularizers/__init__.py,sha256=542Shphw7W8h4Dyf2rmqMKUECVZ8IVBvN9g1LWhz-b
126
126
  keras/saving/__init__.py,sha256=KvL2GZxjvgFgEhvEnkvqjIR9JSNHKz-NWZacXajsjLI,1298
127
127
  keras/src/__init__.py,sha256=Gi4S7EiCMkE03PbdGNpFdaUYySWDs_FcAJ8Taz9Y1BE,684
128
128
  keras/src/api_export.py,sha256=gXOkBOnmscV013WAc75lc4Up01-Kkg9EylIAT_QWctg,1173
129
- keras/src/version.py,sha256=jIhkFIrF_efXcsntiwlP3rpnKtudZLu6Cq5H26XJLBA,204
129
+ keras/src/version.py,sha256=3WxaFuZhA9X3LMhCWXM1PlE7M9PhUGBXga0hrf2P5Qk,204
130
130
  keras/src/activations/__init__.py,sha256=0nL3IFDB9unlrMz8ninKOWo-uCHasTUpTo1tXZb2u44,4433
131
131
  keras/src/activations/activations.py,sha256=mogPggtp4CGldI3VOPNmesRxp6EbiR1_i4KLGaVwzL8,17614
132
132
  keras/src/applications/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -212,7 +212,7 @@ keras/src/backend/tensorflow/nn.py,sha256=oS7sngoA2C2SFfKQdYWvSZe7HCFfG29t4glbE6
212
212
  keras/src/backend/tensorflow/numpy.py,sha256=BWBku9PEiyx3NAcWyccHS_hqu4EzVmNTjrBtMPfSb5U,94514
213
213
  keras/src/backend/tensorflow/optimizer.py,sha256=kFlyEOnGjEYdLpd8mpwhUeku78__xBfZbbrDWpJrq60,9307
214
214
  keras/src/backend/tensorflow/random.py,sha256=iO8V_soaDXZm9ewyAVbjudhsMj08C348c9Bz64nxXC4,6475
215
- keras/src/backend/tensorflow/rnn.py,sha256=SwKOW9j4CYcSYmrlm1vYK34xU0TcVgBcz52fRUT50aM,34600
215
+ keras/src/backend/tensorflow/rnn.py,sha256=99EJqbPdWddmG14zyjjhUZfU5zo9ObmslF_Mak7EmAs,34602
216
216
  keras/src/backend/tensorflow/sparse.py,sha256=a_FZcJY-wPl1x4vY0T7j-GORa4SAuMjNEToJLmK0daQ,32247
217
217
  keras/src/backend/tensorflow/tensorboard.py,sha256=e7pXicuMfQjuCmq1wOmixWhWt2EbjLMBo_JPAqCbZRk,504
218
218
  keras/src/backend/tensorflow/trackable.py,sha256=QZn0JvpBJ7Kx4e6zM2IVIWz9ADcWDB-dHN6vjoQBa9Q,1993
@@ -298,7 +298,7 @@ keras/src/layers/activations/relu.py,sha256=LYtWg_ZpdOEp3YxylsCVdLz00hTgqd0OyFrP
298
298
  keras/src/layers/activations/softmax.py,sha256=HR2FtPzw-vnZAFh4uiF_gksewComHd7z31rwJtCdTCU,2611
299
299
  keras/src/layers/attention/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
300
300
  keras/src/layers/attention/additive_attention.py,sha256=J8joGgCdS4k0tuu8GeDIdabTyJXQk_-JnHgoYABsiGc,4309
301
- keras/src/layers/attention/attention.py,sha256=8kuzrc5yt_YFC7s0oQQBcdoozBzr8bZLWEDdOIYVZPg,13468
301
+ keras/src/layers/attention/attention.py,sha256=1wNlC3fma0ZPb2bS50d6bQ_HB_EuL1UYecAy4wh5sKM,13583
302
302
  keras/src/layers/attention/grouped_query_attention.py,sha256=gJSirCzubfSiNHUeFBvuXVIHDg21fLQSv7u-FPAIoxo,21018
303
303
  keras/src/layers/attention/multi_head_attention.py,sha256=rZmUOvjzwxdF1oopAvlNjkWmUSQ5pLu4zob4xGWeRIM,31934
304
304
  keras/src/layers/convolutional/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -322,7 +322,7 @@ keras/src/layers/core/einsum_dense.py,sha256=yK0ng8wPl17UJP_8LMOPM_R08CFSDrNhWo5
322
322
  keras/src/layers/core/embedding.py,sha256=5y5tvjtOOoxucTRevQGxVkJE3Fn4g03aBNodB3wF7Zg,17144
323
323
  keras/src/layers/core/identity.py,sha256=o0gLHlXL7eNJEbXIgIsgBsZX97K6jN9n3qPXprkXQ9Y,848
324
324
  keras/src/layers/core/input_layer.py,sha256=_CLTG6fxGf4FQ6rx0taxHUG5g0okzErWDF1JAgg5ctw,8129
325
- keras/src/layers/core/lambda_layer.py,sha256=kTtaYYA7cI9obKpDdJ-o9Oi1gEaEfbaMjcYHU9egx1I,9195
325
+ keras/src/layers/core/lambda_layer.py,sha256=Wplek4hOwh_rwXz4_bpz0pXzKe26ywz52glh5uD0l4w,9272
326
326
  keras/src/layers/core/masking.py,sha256=g-RrZ_P50Surh_KGlZQwy2kPNLsop0F8voU4SG2MQkw,2856
327
327
  keras/src/layers/core/wrapper.py,sha256=KIdDBuk24V9rAn97-HUUKQ0JMx9Eyd0q9W4qQFaYNt8,1509
328
328
  keras/src/layers/merging/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -455,10 +455,10 @@ keras/src/legacy/preprocessing/sequence.py,sha256=jyot2KR3652vRxuzmLkWjRd5MivMys
455
455
  keras/src/legacy/preprocessing/text.py,sha256=1NCgRIVZhZoWPSv0GKPGZ2r0D6SvcnHQsLpvFSnVals,11103
456
456
  keras/src/legacy/saving/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
457
457
  keras/src/legacy/saving/json_utils.py,sha256=JIGZu1OJylkP71N6h3IBLoG_e9qnCQAC9H4GdDdUIOc,7296
458
- keras/src/legacy/saving/legacy_h5_format.py,sha256=1STGZkmKwVFjo7VEf5lzPo9despXD7270xi2Ouf6ArA,22901
458
+ keras/src/legacy/saving/legacy_h5_format.py,sha256=7Jyrrzlsn8UPe58DVwYbMJEBCMr6uKNcOKB5WobWyuk,23092
459
459
  keras/src/legacy/saving/saving_options.py,sha256=ZUyOHYsTf0rBLBAOlSaeqVNv9tGjWA9LsNyPk5WTXRI,485
460
- keras/src/legacy/saving/saving_utils.py,sha256=qpqnUJrJ_kkiuhLAAqXKuFuWTU5SBGzCCy_5x56LqaI,9407
461
- keras/src/legacy/saving/serialization.py,sha256=s4qrdywzIRnMccfXRmxbSqqfquQyohIIf7TdjRQCsBc,21808
460
+ keras/src/legacy/saving/saving_utils.py,sha256=8Sa2rmBGnTv86Tix20OgwF5vTLTpUYbfGdgHNSnrB30,9029
461
+ keras/src/legacy/saving/serialization.py,sha256=hiwqO3Il861pkfm0Egaeph2XbhOlQQobmZjbZZgK32c,21368
462
462
  keras/src/losses/__init__.py,sha256=rt63Ye0f7YdAR0eV0EOj2J61DI6xNdp2ojonx6rB3wE,6595
463
463
  keras/src/losses/loss.py,sha256=BjtYoghA3jfpJ4_bG7c3NRK3rk7omzMSCuK9ZNlaYGs,8787
464
464
  keras/src/losses/losses.py,sha256=lVAuX3K4IzeRVvjvnejlieiuxtPRMvXtvmCrLZGsT9s,99534
@@ -523,9 +523,9 @@ keras/src/saving/__init__.py,sha256=vnrtfvnzW7Gwtxe5COhaMoEnVYB5iDe2YlqJ-DvqFIk,
523
523
  keras/src/saving/file_editor.py,sha256=SVrhhqQTF_ANd_hSRIgfM2vCqKBtvSyUaUuI8uuhGms,28976
524
524
  keras/src/saving/keras_saveable.py,sha256=aGIt1ajtsaamfUq18LM6ql8JEoQzi3HwzJEuwQ9bmKE,1285
525
525
  keras/src/saving/object_registration.py,sha256=aZmmFrJP5GjjNpLNmq4k6D-PqdAH8PMBGk7BXI7eogE,7358
526
- keras/src/saving/saving_api.py,sha256=KlTXFSZzGUnG0vvEZdEOsuRH5SvoV15K3_lryYfif7s,11597
526
+ keras/src/saving/saving_api.py,sha256=hYMr0g_4zboDHUA4Dop7PVSPsGB0FBN7d29W4RhNPNI,11655
527
527
  keras/src/saving/saving_lib.py,sha256=-4Gsv9fd2ZK_arAiaDOTmO-yROsfk8ZpyTZGnk2hcxc,58711
528
- keras/src/saving/serialization_lib.py,sha256=qtonRiLraW742tlzulMebA0ptpbFvqvxkd94Ru6TO-w,30391
528
+ keras/src/saving/serialization_lib.py,sha256=NRavrwwrUU5NG0dI8G-x8tHrH55liNJvecg2_y1CAuM,30379
529
529
  keras/src/testing/__init__.py,sha256=xOZf-VBOf3wrXu47PgII2TNfXgxUse60HCinBryHiK8,266
530
530
  keras/src/testing/test_case.py,sha256=YFQYAG-EH-FP70bWLzYP3IG3kDjLc2lvoWJ67mHLohQ,30844
531
531
  keras/src/testing/test_utils.py,sha256=6Vb8tJIyjU1ay63w3jvXNNhh7sSNrosQll4ii1NXELQ,6197
@@ -549,20 +549,21 @@ keras/src/tree/optree_impl.py,sha256=ZLEbRqE6u76miYgiRQc3eRc82_-xMxIZd3wBNQHeJds
549
549
  keras/src/tree/tree_api.py,sha256=cSOp6EMOe8p0DUIbbvELrzIjABTIYX0Fw7CBfqi8pcY,14093
550
550
  keras/src/utils/__init__.py,sha256=WSmTldk6M-XV0X84XR5vryg0BTR8KsTfxNIyRaNkqq0,1423
551
551
  keras/src/utils/argument_validation.py,sha256=uRFoLNJu3L2J8CM8L7uXGqhYi7ji8whh0H8nSHuRUXg,2876
552
- keras/src/utils/audio_dataset_utils.py,sha256=pxg3jOHgZMFhEkuJmCjI-dcrFyv7OlHyWW-49eedKN0,15114
552
+ keras/src/utils/audio_dataset_utils.py,sha256=oxZuYM2LEKtCIyJLYr3ifqlA-Gmr4uzQynPOm6Eh3sU,15117
553
553
  keras/src/utils/backend_utils.py,sha256=U3oXPiuuWBjXs0uFBLf5vpyBYE44Mja-5A1YFxRuzlM,5278
554
554
  keras/src/utils/code_stats.py,sha256=1h4ifpAH5Jezm8BVrKM_WyzcG9uxrUiyzP1kcS4uqlo,1442
555
555
  keras/src/utils/config.py,sha256=3VhENVcng0DeazR-5rvjSnW_sovvOw-skEP-t3xWCEY,4643
556
- keras/src/utils/dataset_utils.py,sha256=k-g4akwOZyXHIq2DNlN8DwWCYSMYWZIYMdQJ3PwZ1IU,28194
556
+ keras/src/utils/dataset_utils.py,sha256=JQJvGMrt69hwJO3eP-FR9oTw7HvCdBoZb0sx_kdEJws,30775
557
557
  keras/src/utils/dtype_utils.py,sha256=wL_WaWYoDzDDmQW6EQGdpBb9O5QJ9OaEJsvY0Mir4uc,1483
558
558
  keras/src/utils/file_utils.py,sha256=kylM3-5YZl9kwkYZIfBR0FIwFPRJjZnNbmhmx2RGNrY,17368
559
- keras/src/utils/image_dataset_utils.py,sha256=doL8q0q4DciFnlO-IyKN1v2Emh_gP4sI2rDhgeKL5qs,16964
559
+ keras/src/utils/grain_utils.py,sha256=Wfwv12E3UrNZjJjTEk2JVV6_YEUav35UJ6bV1UAPEIk,886
560
+ keras/src/utils/image_dataset_utils.py,sha256=0lOzD1CiXwZOe1wW-5uvFKuIgot9PWUC9KJJA0NVuP8,24017
560
561
  keras/src/utils/image_utils.py,sha256=HUI7Zcgqvsmm8a1xwfMwr7pOhnG4lsChP8Owv-xlCTM,16703
561
562
  keras/src/utils/io_utils.py,sha256=2u6b1SEQmkxo4IRPkKBedAkKuRIQMF4CdD-B5ko0Cek,4432
562
563
  keras/src/utils/jax_layer.py,sha256=h4MVRADUAL8t8pHaTKPYt81rxUuW6BfE4MnmQp5ETM0,27189
563
564
  keras/src/utils/jax_utils.py,sha256=vY3P4S9mfWEjdirLd81ocKqeCm-UVfgQ1yTi6UHdBiM,322
564
565
  keras/src/utils/model_visualization.py,sha256=I2NWeo-g0NpCM88HXMDyAbpvF_KIZuJr5hfOLGm922U,17799
565
- keras/src/utils/module_utils.py,sha256=ej6YM6I9dTapJvuhZsA7lvp1wxbKzbzI8T8abjLuxLA,1948
566
+ keras/src/utils/module_utils.py,sha256=j3YV_MkoRffRXtJSEjbbrZ3JVdau657JEb4eNKVrmHg,1976
566
567
  keras/src/utils/naming.py,sha256=bPowKBlgiVP_6XtVlNVHxrxheKuJy2c0e-oEM8ocZQY,1776
567
568
  keras/src/utils/numerical_utils.py,sha256=Uqe5nu1HXmiZuh5-MznomtDSVSO9FgFaltdDtGnN61o,7205
568
569
  keras/src/utils/progbar.py,sha256=HWvgFustRG5WqsiIayaaSiUU2jOYkioEqbQdywmBm0c,10469
@@ -570,10 +571,10 @@ keras/src/utils/python_utils.py,sha256=j8d1oA6oEnU5J0xosWU3t9wIGiblj67OStEn7KJ7j
570
571
  keras/src/utils/rng_utils.py,sha256=NVk7Aavt8A1945YpBIGW18OPebo29g2qHgWZvkgRkW8,2168
571
572
  keras/src/utils/sequence_utils.py,sha256=CveyJ5VM5KJ4pFlo6LWT9omzd_xDeMRjTgczIKekP3Y,4716
572
573
  keras/src/utils/summary_utils.py,sha256=jjbTB6NTqMniSWXPKeNY6dvpn-U37WJdwqdfl8uX5nI,15447
573
- keras/src/utils/text_dataset_utils.py,sha256=JUqDauTec6uRZs71SbKeVjxHx_CNqqOWkoXQ1Q7ldRs,10701
574
+ keras/src/utils/text_dataset_utils.py,sha256=6ACAHwEhjjd5rjfzwLl7Es2qkvmSBUWs5IYQLGrHFrQ,14543
574
575
  keras/src/utils/tf_utils.py,sha256=FTunWC5kdyjsK0TyxQxiHGaYNaAyUxhMX52Zee_Rz9c,4930
575
576
  keras/src/utils/timeseries_dataset_utils.py,sha256=rVxSuqlYLpzw_dVo8Ym5HSE2jFmndS8MAv4Uewycojo,9842
576
- keras/src/utils/torch_utils.py,sha256=y8vRSx_R9MtC1IpRqyK6LBRETxwrlUFnsu45R4_-7Hs,6638
577
+ keras/src/utils/torch_utils.py,sha256=n0CAb7NFnK3CcfxY9VgA2kcZp_8SU05Ddg-KY0-qnoc,6619
577
578
  keras/src/utils/traceback_utils.py,sha256=VI8VJ8QjTDc3-cx3xfR9H7g68D2KVH7VknHi_JrVMuU,8997
578
579
  keras/src/utils/tracking.py,sha256=mVig-TS5LZbModoyAOnN3msazudKggW62hxUq4XzT2I,8844
579
580
  keras/src/visualization/__init__.py,sha256=bDdV3eLKeLKoUwUDBFuZxMO560OyFZND0zBn8vaG6rg,111
@@ -592,7 +593,7 @@ keras/utils/bounding_boxes/__init__.py,sha256=jtvQll4u8ZY0Z96HwNhP1nxWEG9FM3gI-6
592
593
  keras/utils/legacy/__init__.py,sha256=oSYZz6uS8UxSElRaaJYWJEoweJ4GAasZjnn7fNaOlog,342
593
594
  keras/visualization/__init__.py,sha256=UKWmiy6sps4SWlmQi9WX8_Z53cPpLlphz2zIeHdwJpQ,722
594
595
  keras/wrappers/__init__.py,sha256=QkS-O5K8qGS7C3sytF8MpmO6PasATpNVGF8qtb7Ojsw,407
595
- keras_nightly-3.12.0.dev2025081903.dist-info/METADATA,sha256=wXQRJnT0-m8WG318qVx3MY6zCsQHLF2RlmBwjrcMcPQ,5970
596
- keras_nightly-3.12.0.dev2025081903.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
597
- keras_nightly-3.12.0.dev2025081903.dist-info/top_level.txt,sha256=ptcw_-QuGZ4ZDjMdwi_Z0clZm8QAqFdvzzFnDEOTs9o,6
598
- keras_nightly-3.12.0.dev2025081903.dist-info/RECORD,,
596
+ keras_nightly-3.12.0.dev2025082103.dist-info/METADATA,sha256=5k0b73CF7vZyxjf3tDo_vwQc2tqEFc9nNqyeY9C-ROM,5970
597
+ keras_nightly-3.12.0.dev2025082103.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
598
+ keras_nightly-3.12.0.dev2025082103.dist-info/top_level.txt,sha256=ptcw_-QuGZ4ZDjMdwi_Z0clZm8QAqFdvzzFnDEOTs9o,6
599
+ keras_nightly-3.12.0.dev2025082103.dist-info/RECORD,,