keras-nightly 3.12.0.dev2025081804__py3-none-any.whl → 3.12.0.dev2025082003__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -39,17 +39,17 @@ def add(x1, x2):
39
39
 
40
40
  def bartlett(x):
41
41
  x = convert_to_tensor(x)
42
- return jnp.bartlett(x)
42
+ return cast(jnp.bartlett(x), config.floatx())
43
43
 
44
44
 
45
45
  def hamming(x):
46
46
  x = convert_to_tensor(x)
47
- return jnp.hamming(x)
47
+ return cast(jnp.hamming(x), config.floatx())
48
48
 
49
49
 
50
50
  def hanning(x):
51
51
  x = convert_to_tensor(x)
52
- return jnp.hanning(x)
52
+ return cast(jnp.hanning(x), config.floatx())
53
53
 
54
54
 
55
55
  def heaviside(x1, x2):
@@ -60,7 +60,7 @@ def heaviside(x1, x2):
60
60
 
61
61
  def kaiser(x, beta):
62
62
  x = convert_to_tensor(x)
63
- return jnp.kaiser(x, beta)
63
+ return cast(jnp.kaiser(x, beta), config.floatx())
64
64
 
65
65
 
66
66
  def bincount(x, weights=None, minlength=0, sparse=False):
@@ -497,7 +497,7 @@ def right_shift(x, y):
497
497
 
498
498
  def blackman(x):
499
499
  x = convert_to_tensor(x)
500
- return jnp.blackman(x)
500
+ return cast(jnp.blackman(x), config.floatx())
501
501
 
502
502
 
503
503
  def broadcast_to(x, shape):
@@ -843,9 +843,16 @@ def slice(inputs, start_indices, shape):
843
843
  start = ov_opset.concat(start, axis=0).output(0)
844
844
  stop = ov_opset.concat(stop, axis=0).output(0)
845
845
  axes = ov_opset.constant(axes, Type.i32).output(0)
846
- return OpenVINOKerasTensor(
847
- ov_opset.slice(inputs, start, stop, step, axes).output(0)
848
- )
846
+ result = ov_opset.slice(inputs, start, stop, step, axes).output(0)
847
+
848
+ # Apply reshape to ensure output matches expected shape
849
+ # Convert None (dynamic) dimensions to -1 for OpenVINO compatibility
850
+ if all(dim is None or (isinstance(dim, int) and dim >= 0) for dim in shape):
851
+ reshape_pattern = [(-1 if dim is None else dim) for dim in shape]
852
+ target_shape = ov_opset.constant(reshape_pattern, Type.i32).output(0)
853
+ result = ov_opset.reshape(result, target_shape, False).output(0)
854
+
855
+ return OpenVINOKerasTensor(result)
849
856
 
850
857
 
851
858
  def slice_update(inputs, start_indices, updates):
@@ -201,10 +201,9 @@ class MeanMetricWrapper(Mean):
201
201
  def update_state(self, y_true, y_pred, sample_weight=None):
202
202
  mask = backend.get_keras_mask(y_pred)
203
203
  values = self._fn(y_true, y_pred, **self._fn_kwargs)
204
- if sample_weight is not None and mask is not None:
205
- sample_weight = losses.loss.apply_mask(
206
- sample_weight, mask, dtype=self.dtype, reduction="sum"
207
- )
204
+ sample_weight = losses.loss.apply_mask(
205
+ sample_weight, mask, dtype=self.dtype, reduction="sum"
206
+ )
208
207
  return super().update_state(values, sample_weight=sample_weight)
209
208
 
210
209
  def get_config(self):
keras/src/ops/core.py CHANGED
@@ -8,6 +8,7 @@ from keras.src.backend import KerasTensor
8
8
  from keras.src.backend import any_symbolic_tensors
9
9
  from keras.src.backend.common.backend_utils import slice_along_axis
10
10
  from keras.src.ops.operation import Operation
11
+ from keras.src.saving import serialization_lib
11
12
  from keras.src.utils import traceback_utils
12
13
 
13
14
 
@@ -1105,6 +1106,19 @@ class VectorizedMap(Operation):
1105
1106
  y = tree.map_structure(append_batch_axis, y)
1106
1107
  return y
1107
1108
 
1109
+ def get_config(self):
1110
+ config = super().get_config()
1111
+ config.update({"function": self.function})
1112
+ return config
1113
+
1114
+ @classmethod
1115
+ def from_config(cls, config):
1116
+ config = config.copy()
1117
+ config["function"] = serialization_lib.deserialize_keras_object(
1118
+ config["function"]
1119
+ )
1120
+ return cls(**config)
1121
+
1108
1122
 
1109
1123
  @keras_export("keras.ops.vectorized_map")
1110
1124
  def vectorized_map(function, elements):
@@ -411,7 +411,7 @@ def paths_and_labels_to_dataset(
411
411
  """Constructs a fixed-size dataset of audio and labels."""
412
412
  path_ds = tf.data.Dataset.from_tensor_slices(file_paths)
413
413
  if label_mode:
414
- label_ds = dataset_utils.labels_to_dataset(
414
+ label_ds = dataset_utils.labels_to_dataset_tf(
415
415
  labels, label_mode, num_classes
416
416
  )
417
417
  ds = tf.data.Dataset.zip((path_ds, label_ds))
@@ -8,7 +8,9 @@ import numpy as np
8
8
 
9
9
  from keras.src import tree
10
10
  from keras.src.api_export import keras_export
11
+ from keras.src.utils import file_utils
11
12
  from keras.src.utils import io_utils
13
+ from keras.src.utils.module_utils import grain
12
14
  from keras.src.utils.module_utils import tensorflow as tf
13
15
 
14
16
 
@@ -299,6 +301,17 @@ def is_torch_dataset(dataset):
299
301
  return False
300
302
 
301
303
 
304
+ def is_grain_dataset(dataset):
305
+ if hasattr(dataset, "__class__"):
306
+ for parent in dataset.__class__.__mro__:
307
+ if parent.__name__ in (
308
+ "MapDataset",
309
+ "IterDataset",
310
+ ) and str(parent.__module__).startswith("grain._src.python"):
311
+ return True
312
+ return False
313
+
314
+
302
315
  def _rescale_dataset_split_sizes(left_size, right_size, total_length):
303
316
  """Rescale the dataset split sizes.
304
317
 
@@ -476,6 +489,10 @@ def _get_type_spec(dataset):
476
489
  from torch.utils.data import Dataset as TorchDataset
477
490
 
478
491
  return TorchDataset
492
+ elif is_grain_dataset(dataset):
493
+ from grain import MapDataset
494
+
495
+ return MapDataset
479
496
  else:
480
497
  return None
481
498
 
@@ -525,10 +542,17 @@ def index_directory(
525
542
  - class_names: names of the classes corresponding to these labels, in
526
543
  order.
527
544
  """
545
+ if file_utils.is_remote_path(directory):
546
+ os_module = tf.io.gfile
547
+ path_module = tf.io.gfile
548
+ else:
549
+ os_module = os
550
+ path_module = os.path
551
+
528
552
  if labels == "inferred":
529
553
  subdirs = []
530
- for subdir in sorted(tf.io.gfile.listdir(directory)):
531
- if tf.io.gfile.isdir(tf.io.gfile.join(directory, subdir)):
554
+ for subdir in sorted(os_module.listdir(directory)):
555
+ if path_module.isdir(path_module.join(directory, subdir)):
532
556
  if not subdir.startswith("."):
533
557
  if subdir.endswith("/"):
534
558
  subdir = subdir[:-1]
@@ -566,7 +590,7 @@ def index_directory(
566
590
  results = []
567
591
  filenames = []
568
592
 
569
- for dirpath in (tf.io.gfile.join(directory, subdir) for subdir in subdirs):
593
+ for dirpath in (path_module.join(directory, subdir) for subdir in subdirs):
570
594
  results.append(
571
595
  pool.apply_async(
572
596
  index_subdirectory,
@@ -608,7 +632,7 @@ def index_directory(
608
632
  )
609
633
  pool.close()
610
634
  pool.join()
611
- file_paths = [tf.io.gfile.join(directory, fname) for fname in filenames]
635
+ file_paths = [path_module.join(directory, fname) for fname in filenames]
612
636
 
613
637
  if shuffle:
614
638
  # Shuffle globally to erase macro-structure
@@ -623,8 +647,10 @@ def index_directory(
623
647
 
624
648
 
625
649
  def iter_valid_files(directory, follow_links, formats):
650
+ io_module = tf.io.gfile if file_utils.is_remote_path(directory) else os
651
+
626
652
  if not follow_links:
627
- walk = tf.io.gfile.walk(directory)
653
+ walk = io_module.walk(directory)
628
654
  else:
629
655
  walk = os.walk(directory, followlinks=follow_links)
630
656
  for root, _, files in sorted(walk, key=lambda x: x[0]):
@@ -648,14 +674,18 @@ def index_subdirectory(directory, class_indices, follow_links, formats):
648
674
  paths, and `labels` is a list of integer labels corresponding
649
675
  to these files.
650
676
  """
677
+ path_module = (
678
+ tf.io.gfile if file_utils.is_remote_path(directory) else os.path
679
+ )
680
+
651
681
  dirname = os.path.basename(directory)
652
682
  valid_files = iter_valid_files(directory, follow_links, formats)
653
683
  labels = []
654
684
  filenames = []
655
685
  for root, fname in valid_files:
656
686
  labels.append(class_indices[dirname])
657
- absolute_path = tf.io.gfile.join(root, fname)
658
- relative_path = tf.io.gfile.join(
687
+ absolute_path = path_module.join(root, fname)
688
+ relative_path = path_module.join(
659
689
  dirname, os.path.relpath(absolute_path, directory)
660
690
  )
661
691
  filenames.append(relative_path)
@@ -700,7 +730,7 @@ def get_training_or_validation_split(samples, labels, validation_split, subset):
700
730
  return samples, labels
701
731
 
702
732
 
703
- def labels_to_dataset(labels, label_mode, num_classes):
733
+ def labels_to_dataset_tf(labels, label_mode, num_classes):
704
734
  """Create a `tf.data.Dataset` from the list/tuple of labels.
705
735
 
706
736
  Args:
@@ -730,6 +760,51 @@ def labels_to_dataset(labels, label_mode, num_classes):
730
760
  return label_ds
731
761
 
732
762
 
763
+ def labels_to_dataset_grain(labels, label_mode, num_classes):
764
+ """Create a `grain.MapDataset` from the list/tuple of labels.
765
+
766
+ Args:
767
+ labels: list/tuple of labels to be converted into a `grain.MapDataset`.
768
+ label_mode: String describing the encoding of `labels`. Options are:
769
+ - `"binary"` indicates that the labels (there can be only 2) are encoded
770
+ as `float32` scalars with values 0 or 1
771
+ (e.g. for `binary_crossentropy`).
772
+ - `"categorical"` means that the labels are mapped into a categorical
773
+ vector. (e.g. for `categorical_crossentropy` loss).
774
+ num_classes: number of classes of labels.
775
+
776
+ Returns:
777
+ A `grain.MapDataset` instance.
778
+ """
779
+ from keras.src import backend
780
+ from keras.src import ops
781
+
782
+ if label_mode not in ("binary", "categorical", "int"):
783
+ raise ValueError(
784
+ f"Invalid `label_mode`: {label_mode}. "
785
+ "Expected one of: 'binary', 'categorical', 'int'."
786
+ )
787
+
788
+ def preprocess_labels_in_cpu(label_mode, x, num_classes):
789
+ with backend.device_scope("cpu"):
790
+ if label_mode == "binary":
791
+ return ops.expand_dims(
792
+ ops.convert_to_tensor(x, dtype="float32"), axis=-1
793
+ )
794
+ elif label_mode == "categorical":
795
+ return ops.one_hot(
796
+ ops.convert_to_tensor(x, dtype="int32"), num_classes
797
+ )
798
+ else:
799
+ return ops.convert_to_tensor(x, dtype="int32")
800
+
801
+ label_ds = grain.MapDataset.source(labels)
802
+ label_ds = label_ds.map(
803
+ lambda x: preprocess_labels_in_cpu(label_mode, x, num_classes),
804
+ )
805
+ return label_ds
806
+
807
+
733
808
  def check_validation_split_arg(validation_split, subset, shuffle, seed):
734
809
  """Raise errors in case of invalid argument values.
735
810
 
@@ -0,0 +1,33 @@
1
+ from keras.src import backend
2
+ from keras.src import tree
3
+
4
+
5
+ def make_batch(values):
6
+ from keras.src import ops
7
+
8
+ if not values:
9
+ raise ValueError("Cannot batch 0 values. Please file a bug.")
10
+
11
+ with backend.device_scope("cpu"):
12
+ return tree.map_structure(lambda *xs: ops.stack(xs), *values)
13
+
14
+
15
+ def make_string_batch(values):
16
+ from keras.src import ops
17
+
18
+ if not values:
19
+ raise ValueError("Cannot batch 0 values. Please file a bug.")
20
+
21
+ def batch_fn(*xs):
22
+ if isinstance(xs[0], str):
23
+ if backend.backend() == "tensorflow":
24
+ import tensorflow as tf
25
+
26
+ xs = [tf.convert_to_tensor(x, dtype=tf.string) for x in xs]
27
+ xs = tf.stack(xs)
28
+ return xs
29
+ else:
30
+ return ops.stack(xs)
31
+
32
+ with backend.device_scope("cpu"):
33
+ return tree.map_structure(batch_fn, *values)
@@ -1,11 +1,27 @@
1
+ import io
2
+ import pathlib
3
+
1
4
  import numpy as np
2
5
 
3
6
  from keras.src.api_export import keras_export
4
7
  from keras.src.backend.config import standardize_data_format
5
8
  from keras.src.utils import dataset_utils
6
9
  from keras.src.utils import image_utils
10
+ from keras.src.utils.grain_utils import make_batch
11
+ from keras.src.utils.module_utils import grain
7
12
  from keras.src.utils.module_utils import tensorflow as tf
8
13
 
14
+ try:
15
+ from PIL import Image as pil_image
16
+
17
+ try:
18
+ pil_image_resampling = pil_image.Resampling
19
+ except AttributeError:
20
+ pil_image_resampling = pil_image
21
+ except ImportError:
22
+ pil_image = None
23
+ pil_image_resampling = None
24
+
9
25
  ALLOWLIST_FORMATS = (".bmp", ".gif", ".jpeg", ".jpg", ".png")
10
26
 
11
27
 
@@ -32,9 +48,10 @@ def image_dataset_from_directory(
32
48
  crop_to_aspect_ratio=False,
33
49
  pad_to_aspect_ratio=False,
34
50
  data_format=None,
51
+ format="tf",
35
52
  verbose=True,
36
53
  ):
37
- """Generates a `tf.data.Dataset` from image files in a directory.
54
+ """Generates a dataset from image files in a directory.
38
55
 
39
56
  If your directory structure is:
40
57
 
@@ -49,13 +66,17 @@ def image_dataset_from_directory(
49
66
  ```
50
67
 
51
68
  Then calling `image_dataset_from_directory(main_directory,
52
- labels='inferred')` will return a `tf.data.Dataset` that yields batches of
69
+ labels='inferred')` will return a dataset that yields batches of
53
70
  images from the subdirectories `class_a` and `class_b`, together with labels
54
71
  0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`).
55
72
 
56
73
  Supported image formats: `.jpeg`, `.jpg`, `.png`, `.bmp`, `.gif`.
57
74
  Animated gifs are truncated to the first frame.
58
75
 
76
+ By default, this function will return a `tf.data.Dataset` object. You can
77
+ set `format="grain"` to return a `grain.IterDataset` object instead, which
78
+ removes the TensorFlow dependency.
79
+
59
80
  Args:
60
81
  directory: Directory where the data is located.
61
82
  If `labels` is `"inferred"`, it should contain
@@ -125,12 +146,19 @@ def image_dataset_from_directory(
125
146
  preserved.
126
147
  data_format: If None uses keras.config.image_data_format()
127
148
  otherwise either 'channel_last' or 'channel_first'.
149
+ format: The format of the return object. Defaults to `"tf"`. Available
150
+ options are:
151
+ - `"tf"`: returns a `tf.data.Dataset` object. Requires
152
+ TensorFlow to be installed.
153
+ - `"grain"`: returns a `grain.IterDataset` object. Requires
154
+ Grain to be installed.
128
155
  verbose: Whether to display number information on classes and
129
156
  number of files found. Defaults to `True`.
130
157
 
131
158
  Returns:
132
159
 
133
- A `tf.data.Dataset` object.
160
+ A `tf.data.Dataset` (`format="tf"`) or `grain.IterDataset`
161
+ (`format="grain"`) object.
134
162
 
135
163
  - If `label_mode` is `None`, it yields `float32` tensors of shape
136
164
  `(batch_size, image_size[0], image_size[1], num_channels)`,
@@ -222,6 +250,11 @@ def image_dataset_from_directory(
222
250
  f"{supported_interpolations}. "
223
251
  f"Received: interpolation={interpolation}"
224
252
  )
253
+ if format not in ("tf", "grain"):
254
+ raise ValueError(
255
+ '`format` should be either "tf" or "grain". '
256
+ f"Received: format={format}"
257
+ )
225
258
 
226
259
  dataset_utils.check_validation_split_arg(
227
260
  validation_split, subset, shuffle, seed
@@ -289,6 +322,7 @@ def image_dataset_from_directory(
289
322
  shuffle=shuffle,
290
323
  shuffle_buffer_size=shuffle_buffer_size,
291
324
  seed=seed,
325
+ format=format,
292
326
  )
293
327
 
294
328
  val_dataset = paths_and_labels_to_dataset(
@@ -303,14 +337,23 @@ def image_dataset_from_directory(
303
337
  pad_to_aspect_ratio=pad_to_aspect_ratio,
304
338
  data_format=data_format,
305
339
  shuffle=False,
340
+ format=format,
306
341
  )
307
342
 
308
- if batch_size is not None:
309
- train_dataset = train_dataset.batch(batch_size)
310
- val_dataset = val_dataset.batch(batch_size)
311
-
312
- train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE)
313
- val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE)
343
+ if format == "tf":
344
+ if batch_size is not None:
345
+ train_dataset = train_dataset.batch(batch_size)
346
+ val_dataset = val_dataset.batch(batch_size)
347
+ train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE)
348
+ val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE)
349
+ else:
350
+ train_dataset = train_dataset.to_iter_dataset()
351
+ val_dataset = val_dataset.to_iter_dataset()
352
+ if batch_size is not None:
353
+ train_dataset = train_dataset.batch(
354
+ batch_size, batch_fn=make_batch
355
+ )
356
+ val_dataset = val_dataset.batch(batch_size, batch_fn=make_batch)
314
357
 
315
358
  # Users may need to reference `class_names`.
316
359
  train_dataset.class_names = class_names
@@ -345,12 +388,18 @@ def image_dataset_from_directory(
345
388
  shuffle=shuffle,
346
389
  shuffle_buffer_size=shuffle_buffer_size,
347
390
  seed=seed,
391
+ format=format,
348
392
  )
349
393
 
350
- if batch_size is not None:
351
- dataset = dataset.batch(batch_size)
394
+ if format == "tf":
395
+ if batch_size is not None:
396
+ dataset = dataset.batch(batch_size)
397
+ dataset = dataset.prefetch(tf.data.AUTOTUNE)
398
+ else:
399
+ dataset = dataset.to_iter_dataset()
400
+ if batch_size is not None:
401
+ dataset = dataset.batch(batch_size, batch_fn=make_batch)
352
402
 
353
- dataset = dataset.prefetch(tf.data.AUTOTUNE)
354
403
  # Users may need to reference `class_names`.
355
404
  dataset.class_names = class_names
356
405
 
@@ -374,11 +423,66 @@ def paths_and_labels_to_dataset(
374
423
  shuffle=False,
375
424
  shuffle_buffer_size=None,
376
425
  seed=None,
426
+ format="tf",
427
+ ):
428
+ """Constructs a dataset of images and labels."""
429
+ if format == "tf":
430
+ return _paths_and_labels_to_dataset_tf(
431
+ image_paths=image_paths,
432
+ image_size=image_size,
433
+ num_channels=num_channels,
434
+ labels=labels,
435
+ label_mode=label_mode,
436
+ num_classes=num_classes,
437
+ interpolation=interpolation,
438
+ data_format=data_format,
439
+ crop_to_aspect_ratio=crop_to_aspect_ratio,
440
+ pad_to_aspect_ratio=pad_to_aspect_ratio,
441
+ shuffle=shuffle,
442
+ shuffle_buffer_size=shuffle_buffer_size,
443
+ seed=seed,
444
+ )
445
+ elif format == "grain":
446
+ return _paths_and_labels_to_dataset_grain(
447
+ image_paths=image_paths,
448
+ image_size=image_size,
449
+ num_channels=num_channels,
450
+ labels=labels,
451
+ label_mode=label_mode,
452
+ num_classes=num_classes,
453
+ interpolation=interpolation,
454
+ data_format=data_format,
455
+ crop_to_aspect_ratio=crop_to_aspect_ratio,
456
+ pad_to_aspect_ratio=pad_to_aspect_ratio,
457
+ shuffle=shuffle,
458
+ seed=seed,
459
+ )
460
+ else:
461
+ raise ValueError(
462
+ '`format` should be either "tf" or "grain". '
463
+ f"Received: format={format}"
464
+ )
465
+
466
+
467
+ def _paths_and_labels_to_dataset_tf(
468
+ image_paths,
469
+ image_size,
470
+ num_channels,
471
+ labels,
472
+ label_mode,
473
+ num_classes,
474
+ interpolation,
475
+ data_format,
476
+ crop_to_aspect_ratio=False,
477
+ pad_to_aspect_ratio=False,
478
+ shuffle=False,
479
+ shuffle_buffer_size=None,
480
+ seed=None,
377
481
  ):
378
482
  """Constructs a dataset of images and labels."""
379
483
  path_ds = tf.data.Dataset.from_tensor_slices(image_paths)
380
484
  if label_mode:
381
- label_ds = dataset_utils.labels_to_dataset(
485
+ label_ds = dataset_utils.labels_to_dataset_tf(
382
486
  labels, label_mode, num_classes
383
487
  )
384
488
  ds = tf.data.Dataset.zip((path_ds, label_ds))
@@ -398,17 +502,18 @@ def paths_and_labels_to_dataset(
398
502
  )
399
503
  if label_mode:
400
504
  ds = ds.map(
401
- lambda x, y: (load_image(x, *args), y),
505
+ lambda x, y: (_load_image_tf(x, *args), y),
402
506
  num_parallel_calls=tf.data.AUTOTUNE,
403
507
  )
404
508
  else:
405
509
  ds = ds.map(
406
- lambda x: load_image(x, *args), num_parallel_calls=tf.data.AUTOTUNE
510
+ lambda x: _load_image_tf(x, *args),
511
+ num_parallel_calls=tf.data.AUTOTUNE,
407
512
  )
408
513
  return ds
409
514
 
410
515
 
411
- def load_image(
516
+ def _load_image_tf(
412
517
  path,
413
518
  image_size,
414
519
  num_channels,
@@ -457,3 +562,120 @@ def load_image(
457
562
  else:
458
563
  img.set_shape((num_channels, image_size[0], image_size[1]))
459
564
  return img
565
+
566
+
567
+ def _paths_and_labels_to_dataset_grain(
568
+ image_paths,
569
+ image_size,
570
+ num_channels,
571
+ labels,
572
+ label_mode,
573
+ num_classes,
574
+ interpolation,
575
+ data_format,
576
+ crop_to_aspect_ratio=False,
577
+ pad_to_aspect_ratio=False,
578
+ shuffle=False,
579
+ seed=None,
580
+ ):
581
+ """Constructs a dataset of images and labels."""
582
+ path_ds = grain.MapDataset.source(image_paths)
583
+ if label_mode:
584
+ label_ds = dataset_utils.labels_to_dataset_grain(
585
+ labels, label_mode, num_classes
586
+ )
587
+ ds = grain.experimental.ZipMapDataset([path_ds, label_ds])
588
+ else:
589
+ ds = path_ds
590
+
591
+ if shuffle:
592
+ ds = ds.shuffle(seed=seed)
593
+
594
+ args = (
595
+ image_size,
596
+ num_channels,
597
+ interpolation,
598
+ data_format,
599
+ crop_to_aspect_ratio,
600
+ pad_to_aspect_ratio,
601
+ )
602
+ if label_mode:
603
+ ds = ds.map(lambda data: (_load_image_grain(data[0], *args), data[1]))
604
+ else:
605
+ ds = ds.map(lambda x: _load_image_grain(x, *args))
606
+
607
+ return ds
608
+
609
+
610
+ def _load_image_grain(
611
+ path,
612
+ image_size,
613
+ num_channels,
614
+ interpolation,
615
+ data_format,
616
+ crop_to_aspect_ratio=False,
617
+ pad_to_aspect_ratio=False,
618
+ ):
619
+ """Load an image from a path and resize it."""
620
+ from keras.src import backend
621
+ from keras.src import ops
622
+
623
+ if pil_image is None:
624
+ raise ImportError(
625
+ "Could not import PIL.Image. The use of `load_img` requires PIL."
626
+ )
627
+ if pad_to_aspect_ratio and crop_to_aspect_ratio:
628
+ raise ValueError(
629
+ "Only one of `pad_to_aspect_ratio`, `crop_to_aspect_ratio`"
630
+ " can be set to `True`."
631
+ )
632
+
633
+ if isinstance(path, io.BytesIO):
634
+ img = pil_image.open(path)
635
+ elif isinstance(path, (pathlib.Path, bytes, str)):
636
+ if isinstance(path, pathlib.Path):
637
+ path = str(path.resolve())
638
+ img = pil_image.open(path)
639
+ else:
640
+ raise TypeError(
641
+ f"path should be path-like or io.BytesIO, not {type(path)}"
642
+ )
643
+ if num_channels == 1:
644
+ # if image is not already an 8-bit, 16-bit or 32-bit grayscale image
645
+ # convert it to an 8-bit grayscale image.
646
+ if img.mode not in ("L", "I;16", "I"):
647
+ img = img.convert("L")
648
+ elif num_channels == 4:
649
+ if img.mode != "RGBA":
650
+ img = img.convert("RGBA")
651
+ elif num_channels == 3:
652
+ if img.mode != "RGB":
653
+ img = img.convert("RGB")
654
+ else:
655
+ raise ValueError(
656
+ "num_channels must be 1, 3 or 4. "
657
+ f"Received: num_channels={num_channels}"
658
+ )
659
+
660
+ with backend.device_scope("cpu"):
661
+ img = ops.convert_to_tensor(np.array(img), dtype="float32")
662
+ if len(img.shape) == 2:
663
+ # If the image is grayscale, expand dims to add channel axis.
664
+ # The reason is that `ops.image.resize` expects 3D or 4D tensors.
665
+ img = ops.expand_dims(img, axis=-1)
666
+ if data_format == "channels_first":
667
+ img = ops.transpose(img, (2, 0, 1))
668
+ img = ops.image.resize(
669
+ img,
670
+ size=image_size,
671
+ interpolation=interpolation,
672
+ crop_to_aspect_ratio=crop_to_aspect_ratio,
673
+ pad_to_aspect_ratio=pad_to_aspect_ratio,
674
+ data_format=data_format,
675
+ )
676
+ if backend.backend() == "tensorflow":
677
+ if data_format == "channels_last":
678
+ img.set_shape((image_size[0], image_size[1], num_channels))
679
+ else:
680
+ img.set_shape((num_channels, image_size[0], image_size[1]))
681
+ return img
@@ -58,3 +58,4 @@ torch_xla = LazyModule(
58
58
  optree = LazyModule("optree")
59
59
  dmtree = LazyModule("tree")
60
60
  tf2onnx = LazyModule("tf2onnx")
61
+ grain = LazyModule("grain")
@@ -2,6 +2,8 @@ import numpy as np
2
2
 
3
3
  from keras.src.api_export import keras_export
4
4
  from keras.src.utils import dataset_utils
5
+ from keras.src.utils.grain_utils import make_string_batch
6
+ from keras.src.utils.module_utils import grain
5
7
  from keras.src.utils.module_utils import tensorflow as tf
6
8
 
7
9
 
@@ -23,9 +25,10 @@ def text_dataset_from_directory(
23
25
  validation_split=None,
24
26
  subset=None,
25
27
  follow_links=False,
28
+ format="tf",
26
29
  verbose=True,
27
30
  ):
28
- """Generates a `tf.data.Dataset` from text files in a directory.
31
+ """Generates a dataset from text files in a directory.
29
32
 
30
33
  If your directory structure is:
31
34
 
@@ -40,12 +43,16 @@ def text_dataset_from_directory(
40
43
  ```
41
44
 
42
45
  Then calling `text_dataset_from_directory(main_directory,
43
- labels='inferred')` will return a `tf.data.Dataset` that yields batches of
46
+ labels='inferred')` will return a dataset that yields batches of
44
47
  texts from the subdirectories `class_a` and `class_b`, together with labels
45
48
  0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`).
46
49
 
47
50
  Only `.txt` files are supported at this time.
48
51
 
52
+ By default, this function will return a `tf.data.Dataset` object. You can
53
+ set `format="grain"` to return a `grain.IterDataset` object instead, which
54
+ removes the TensorFlow dependency.
55
+
49
56
  Args:
50
57
  directory: Directory where the data is located.
51
58
  If `labels` is `"inferred"`, it should contain
@@ -91,19 +98,34 @@ def text_dataset_from_directory(
91
98
  (the training and validation datasets respectively).
92
99
  follow_links: Whether to visits subdirectories pointed to by symlinks.
93
100
  Defaults to `False`.
101
+ format: The format of the return object. Defaults to `"tf"`. Available
102
+ options are:
103
+ - `"tf"`: returns a `tf.data.Dataset` object. Requires
104
+ TensorFlow to be installed.
105
+ - `"grain"`: returns a `grain.IterDataset` object. Requires
106
+ Grain to be installed.
94
107
  verbose: Whether to display number information on classes and
95
108
  number of files found. Defaults to `True`.
96
109
 
97
110
  Returns:
98
111
 
99
- A `tf.data.Dataset` object.
112
+ A `tf.data.Dataset` (`format="tf"`) or `grain.IterDataset`
113
+ (`format="grain"`) object.
100
114
 
115
+ When `format="tf"`:
101
116
  - If `label_mode` is `None`, it yields `string` tensors of shape
102
117
  `(batch_size,)`, containing the contents of a batch of text files.
103
118
  - Otherwise, it yields a tuple `(texts, labels)`, where `texts`
104
119
  has shape `(batch_size,)` and `labels` follows the format described
105
120
  below.
106
121
 
122
+ When `format="grain"`:
123
+ - If `label_mode` is `None`, it yields a list of Python strings containing
124
+ the contents of a batch of text files.
125
+ - Otherwise, it yields a tuple `(texts, labels)`, where `texts`
126
+ is a list of Python strings and `labels` follows the format described
127
+ below.
128
+
107
129
  Rules regarding labels format:
108
130
 
109
131
  - if `label_mode` is `int`, the labels are an `int32` tensor of shape
@@ -137,6 +159,11 @@ def text_dataset_from_directory(
137
159
  '"categorical", "binary", '
138
160
  f"or None. Received: label_mode={label_mode}"
139
161
  )
162
+ if format not in ("tf", "grain"):
163
+ raise ValueError(
164
+ '`format` should be either "tf" or "grain". '
165
+ f"Received: format={format}"
166
+ )
140
167
  if labels is None or label_mode is None:
141
168
  labels = None
142
169
  label_mode = None
@@ -199,6 +226,7 @@ def text_dataset_from_directory(
199
226
  shuffle=shuffle,
200
227
  shuffle_buffer_size=shuffle_buffer_size,
201
228
  seed=seed,
229
+ format=format,
202
230
  )
203
231
  val_dataset = paths_and_labels_to_dataset(
204
232
  file_paths=file_paths_val,
@@ -207,14 +235,25 @@ def text_dataset_from_directory(
207
235
  num_classes=len(class_names) if class_names else 0,
208
236
  max_length=max_length,
209
237
  shuffle=False,
238
+ format=format,
210
239
  )
211
240
 
212
- if batch_size is not None:
213
- train_dataset = train_dataset.batch(batch_size)
214
- val_dataset = val_dataset.batch(batch_size)
215
-
216
- train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE)
217
- val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE)
241
+ if format == "tf":
242
+ if batch_size is not None:
243
+ train_dataset = train_dataset.batch(batch_size)
244
+ val_dataset = val_dataset.batch(batch_size)
245
+ train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE)
246
+ val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE)
247
+ else:
248
+ train_dataset = train_dataset.to_iter_dataset()
249
+ val_dataset = val_dataset.to_iter_dataset()
250
+ if batch_size is not None:
251
+ train_dataset = train_dataset.batch(
252
+ batch_size, batch_fn=make_string_batch
253
+ )
254
+ val_dataset = val_dataset.batch(
255
+ batch_size, batch_fn=make_string_batch
256
+ )
218
257
 
219
258
  # Users may need to reference `class_names`.
220
259
  train_dataset.class_names = class_names
@@ -238,10 +277,17 @@ def text_dataset_from_directory(
238
277
  shuffle=shuffle,
239
278
  shuffle_buffer_size=shuffle_buffer_size,
240
279
  seed=seed,
280
+ format=format,
241
281
  )
242
- if batch_size is not None:
243
- dataset = dataset.batch(batch_size)
244
- dataset = dataset.prefetch(tf.data.AUTOTUNE)
282
+
283
+ if format == "tf":
284
+ if batch_size is not None:
285
+ dataset = dataset.batch(batch_size)
286
+ dataset = dataset.prefetch(tf.data.AUTOTUNE)
287
+ else:
288
+ dataset = dataset.to_iter_dataset()
289
+ if batch_size is not None:
290
+ dataset = dataset.batch(batch_size, batch_fn=make_string_batch)
245
291
 
246
292
  # Users may need to reference `class_names`.
247
293
  dataset.class_names = class_names
@@ -257,11 +303,47 @@ def paths_and_labels_to_dataset(
257
303
  shuffle=False,
258
304
  shuffle_buffer_size=None,
259
305
  seed=None,
306
+ format="tf",
307
+ ):
308
+ """Constructs a dataset of text strings and labels."""
309
+ if format == "tf":
310
+ return _paths_and_labels_to_dataset_tf(
311
+ file_paths,
312
+ labels,
313
+ label_mode,
314
+ num_classes,
315
+ max_length,
316
+ shuffle,
317
+ shuffle_buffer_size,
318
+ seed,
319
+ )
320
+ elif format == "grain":
321
+ return _paths_and_labels_to_dataset_grain(
322
+ file_paths,
323
+ labels,
324
+ label_mode,
325
+ num_classes,
326
+ max_length,
327
+ shuffle,
328
+ shuffle_buffer_size,
329
+ seed,
330
+ )
331
+
332
+
333
+ def _paths_and_labels_to_dataset_tf(
334
+ file_paths,
335
+ labels,
336
+ label_mode,
337
+ num_classes,
338
+ max_length,
339
+ shuffle=False,
340
+ shuffle_buffer_size=None,
341
+ seed=None,
260
342
  ):
261
343
  """Constructs a dataset of text strings and labels."""
262
344
  path_ds = tf.data.Dataset.from_tensor_slices(file_paths)
263
345
  if label_mode:
264
- label_ds = dataset_utils.labels_to_dataset(
346
+ label_ds = dataset_utils.labels_to_dataset_tf(
265
347
  labels, label_mode, num_classes
266
348
  )
267
349
  ds = tf.data.Dataset.zip((path_ds, label_ds))
@@ -273,19 +355,62 @@ def paths_and_labels_to_dataset(
273
355
 
274
356
  if label_mode:
275
357
  ds = ds.map(
276
- lambda x, y: (path_to_string_content(x, max_length), y),
358
+ lambda x, y: (_path_to_string_content_tf(x, max_length), y),
277
359
  num_parallel_calls=tf.data.AUTOTUNE,
278
360
  )
279
361
  else:
280
362
  ds = ds.map(
281
- lambda x: path_to_string_content(x, max_length),
363
+ lambda x: _path_to_string_content_tf(x, max_length),
282
364
  num_parallel_calls=tf.data.AUTOTUNE,
283
365
  )
284
366
  return ds
285
367
 
286
368
 
287
- def path_to_string_content(path, max_length):
369
+ def _path_to_string_content_tf(path, max_length):
288
370
  txt = tf.io.read_file(path)
289
371
  if max_length is not None:
290
372
  txt = tf.strings.substr(txt, 0, max_length)
291
373
  return txt
374
+
375
+
376
+ def _paths_and_labels_to_dataset_grain(
377
+ file_paths,
378
+ labels,
379
+ label_mode,
380
+ num_classes,
381
+ max_length,
382
+ shuffle=False,
383
+ shuffle_buffer_size=None,
384
+ seed=None,
385
+ ):
386
+ """Constructs a dataset of text strings and labels."""
387
+ path_ds = grain.MapDataset.source(file_paths)
388
+ if label_mode:
389
+ label_ds = dataset_utils.labels_to_dataset_grain(
390
+ labels, label_mode, num_classes
391
+ )
392
+ ds = grain.experimental.ZipMapDataset([path_ds, label_ds])
393
+ else:
394
+ ds = path_ds
395
+
396
+ if shuffle:
397
+ ds = ds.shuffle(seed=seed)
398
+
399
+ if label_mode:
400
+ ds = ds.map(
401
+ lambda data: (
402
+ _path_to_string_content_grain(data[0], max_length),
403
+ data[1],
404
+ ),
405
+ )
406
+ else:
407
+ ds = ds.map(lambda x: _path_to_string_content_grain(x, max_length))
408
+ return ds
409
+
410
+
411
+ def _path_to_string_content_grain(path, max_length):
412
+ with open(path, "r") as f:
413
+ txt = f.read()
414
+ if max_length is not None:
415
+ txt = txt[:max_length]
416
+ return txt
keras/src/version.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from keras.src.api_export import keras_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "3.12.0.dev2025081804"
4
+ __version__ = "3.12.0.dev2025082003"
5
5
 
6
6
 
7
7
  @keras_export("keras.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: keras-nightly
3
- Version: 3.12.0.dev2025081804
3
+ Version: 3.12.0.dev2025082003
4
4
  Summary: Multi-backend Keras
5
5
  Author-email: Keras team <keras-users@googlegroups.com>
6
6
  License: Apache License 2.0
@@ -126,7 +126,7 @@ keras/regularizers/__init__.py,sha256=542Shphw7W8h4Dyf2rmqMKUECVZ8IVBvN9g1LWhz-b
126
126
  keras/saving/__init__.py,sha256=KvL2GZxjvgFgEhvEnkvqjIR9JSNHKz-NWZacXajsjLI,1298
127
127
  keras/src/__init__.py,sha256=Gi4S7EiCMkE03PbdGNpFdaUYySWDs_FcAJ8Taz9Y1BE,684
128
128
  keras/src/api_export.py,sha256=gXOkBOnmscV013WAc75lc4Up01-Kkg9EylIAT_QWctg,1173
129
- keras/src/version.py,sha256=AqccLKeItJzjB-pkWdwsDXqBu69XF8tGGJlj-42BrMA,204
129
+ keras/src/version.py,sha256=njY7f4Il2CTFP8VpP-i-fu_W9TrUOijOX1eeYI-KMMA,204
130
130
  keras/src/activations/__init__.py,sha256=0nL3IFDB9unlrMz8ninKOWo-uCHasTUpTo1tXZb2u44,4433
131
131
  keras/src/activations/activations.py,sha256=mogPggtp4CGldI3VOPNmesRxp6EbiR1_i4KLGaVwzL8,17614
132
132
  keras/src/applications/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -169,7 +169,7 @@ keras/src/backend/jax/layer.py,sha256=QxZeeiimUulsb3j1h3ncNxIoTYdKPO89s0kP49ZwF-
169
169
  keras/src/backend/jax/linalg.py,sha256=dtGHRYCvoVlRX0UwbDDdunA8Vp_mA3sdqoasX4P8SbQ,2532
170
170
  keras/src/backend/jax/math.py,sha256=1IEDpdoF8e5ltu3D4wbDQuihzvJHhMXz8W9Z_E-eJqU,9391
171
171
  keras/src/backend/jax/nn.py,sha256=R0a8-WB0YCl14FpRi2CQ45MFRvHCFtPTedk0Q1LfWYc,45935
172
- keras/src/backend/jax/numpy.py,sha256=tNBmRx0elWIgzOGwx3k-voQ57_xYtXOrvbsGrjx8vpw,36572
172
+ keras/src/backend/jax/numpy.py,sha256=ZUvPpqfwfDOw2n-zo156AhoWzuweCTs6g034A0tqpMs,36687
173
173
  keras/src/backend/jax/optimizer.py,sha256=JSKRkBteb7u-He5rtHwU6Wy5p8IjSsZf-IIL4-eQfsE,4102
174
174
  keras/src/backend/jax/random.py,sha256=Uk2huGIk_dlzMrx5eDVrrr2TeCEMitn2vr4yzA0NXjs,3594
175
175
  keras/src/backend/jax/rnn.py,sha256=Ycq0qfLY4M4jhltvztpLQyywjEM17T7CZQFh4hhHOUE,7767
@@ -189,7 +189,7 @@ keras/src/backend/numpy/random.py,sha256=wx2nE75q7L2cBMjtQlQx8yKMj4Ie3puFMDQsbrZ
189
189
  keras/src/backend/numpy/rnn.py,sha256=thOsMung1qR3lQsR4_D6hqKMFollQgrB0KwsJLk4BMY,7867
190
190
  keras/src/backend/numpy/trainer.py,sha256=MzWr8_LLHa1P6fxdUWirGw_lQwHGF_vkZ7RUGLUzjUs,11126
191
191
  keras/src/backend/openvino/__init__.py,sha256=gltfAbi9LMAAalH1fRIRWS1LRjf5EreWqOMtXqlliwY,1323
192
- keras/src/backend/openvino/core.py,sha256=kg7afXa_0k2UlbZ_8O9ohr53kPfB3R5FzeaT2DirI6w,41466
192
+ keras/src/backend/openvino/core.py,sha256=v5lk59tssLzF-r6gOLd9HNk1Fz1FKJ-o2jdSEDUGX-M,41913
193
193
  keras/src/backend/openvino/export.py,sha256=eDDZmCTXIyii3YXEPMEDXYVUI_z07BlHJaD0NovEoXE,360
194
194
  keras/src/backend/openvino/image.py,sha256=2X7bSb9kMTKkSKKtZ3CW4pOlDo4H2vGyGpWDQUN3tWY,1903
195
195
  keras/src/backend/openvino/layer.py,sha256=5RdvaH1yOyPAphjKiuQAK1H_yZFYKE1Hp7c5bZ1pkRk,30
@@ -472,7 +472,7 @@ keras/src/metrics/iou_metrics.py,sha256=JRN9h5PquDfY-OPkqo1cFLLT0oiSYbbV7J54PxuO
472
472
  keras/src/metrics/metric.py,sha256=1PX_RDtB9PyLGxeUFenPoeVJphFIsNkD_BxFgjl7jvk,8759
473
473
  keras/src/metrics/metrics_utils.py,sha256=YEK52B_liCGFM_VFsGGb-fpNxVsGR4VZjMzfNGP2wPY,26725
474
474
  keras/src/metrics/probabilistic_metrics.py,sha256=cyDuxohv3eqbVjGhTljwo507wzriuXG20OVsCXd0Fo8,10640
475
- keras/src/metrics/reduction_metrics.py,sha256=-imgCBWg9Kdfx_k4Shq81h07feoHDquB_J704NgFQ1g,7345
475
+ keras/src/metrics/reduction_metrics.py,sha256=dWGDRZhts3sPNvnVlVaHsDf2HxSz4S3NiWeGc5XS73M,7274
476
476
  keras/src/metrics/regression_metrics.py,sha256=eLacV_8CKtzA26BJDJuncUDATuL1x8O6SRHqLA9eSFc,19756
477
477
  keras/src/models/__init__.py,sha256=DPbBPSfIGgsufTfJH5U5xJOeN_Ef4FMadT7KKYg3Kjg,143
478
478
  keras/src/models/cloning.py,sha256=jwVtVVVYVasFIrln2hrzJ8bR2Xhsx9wYgEqpF1AjSvE,15786
@@ -481,7 +481,7 @@ keras/src/models/model.py,sha256=zsIZUIsXbl_PpMX7GaNOPiT-QeHDZvPIoCteN7Ee2Bs,354
481
481
  keras/src/models/sequential.py,sha256=CC9Q1BNB9m7TkgMHRyjOzhQvneng576wJpmdgHrACKY,14352
482
482
  keras/src/models/variable_mapping.py,sha256=FVtcgjBRqOxtvkzOE6kjG9SpcB9keDg2gS5LOTlXvG0,2181
483
483
  keras/src/ops/__init__.py,sha256=aORlvnrqY_eQl0EFLWdpHsXHnQ6JLSw1qhwJMr-VXJ0,644
484
- keras/src/ops/core.py,sha256=OjawYtGqKRZem0gW6r6gjsZtKO3Oa_iwHRmXX3yEJJU,42370
484
+ keras/src/ops/core.py,sha256=t06-MvptYb6ZVwmNj083JyUtzU4M6UTVXOT2vVHtKyU,42781
485
485
  keras/src/ops/einops.py,sha256=-pxW0_AzDQNsR7t2TJrzvYXBJpmLYA3fJoO0U_U96PY,6268
486
486
  keras/src/ops/function.py,sha256=wFt_CZ5_yi9jcLjk5yzyHpXKhreeXo3P8zDFQ9P8yA8,17832
487
487
  keras/src/ops/image.py,sha256=hx1-EEYPiRygVtXqMIQg-8g0iPEu0g9MnSqN5Nr1_Yk,60777
@@ -549,20 +549,21 @@ keras/src/tree/optree_impl.py,sha256=ZLEbRqE6u76miYgiRQc3eRc82_-xMxIZd3wBNQHeJds
549
549
  keras/src/tree/tree_api.py,sha256=cSOp6EMOe8p0DUIbbvELrzIjABTIYX0Fw7CBfqi8pcY,14093
550
550
  keras/src/utils/__init__.py,sha256=WSmTldk6M-XV0X84XR5vryg0BTR8KsTfxNIyRaNkqq0,1423
551
551
  keras/src/utils/argument_validation.py,sha256=uRFoLNJu3L2J8CM8L7uXGqhYi7ji8whh0H8nSHuRUXg,2876
552
- keras/src/utils/audio_dataset_utils.py,sha256=pxg3jOHgZMFhEkuJmCjI-dcrFyv7OlHyWW-49eedKN0,15114
552
+ keras/src/utils/audio_dataset_utils.py,sha256=oxZuYM2LEKtCIyJLYr3ifqlA-Gmr4uzQynPOm6Eh3sU,15117
553
553
  keras/src/utils/backend_utils.py,sha256=U3oXPiuuWBjXs0uFBLf5vpyBYE44Mja-5A1YFxRuzlM,5278
554
554
  keras/src/utils/code_stats.py,sha256=1h4ifpAH5Jezm8BVrKM_WyzcG9uxrUiyzP1kcS4uqlo,1442
555
555
  keras/src/utils/config.py,sha256=3VhENVcng0DeazR-5rvjSnW_sovvOw-skEP-t3xWCEY,4643
556
- keras/src/utils/dataset_utils.py,sha256=k-g4akwOZyXHIq2DNlN8DwWCYSMYWZIYMdQJ3PwZ1IU,28194
556
+ keras/src/utils/dataset_utils.py,sha256=JQJvGMrt69hwJO3eP-FR9oTw7HvCdBoZb0sx_kdEJws,30775
557
557
  keras/src/utils/dtype_utils.py,sha256=wL_WaWYoDzDDmQW6EQGdpBb9O5QJ9OaEJsvY0Mir4uc,1483
558
558
  keras/src/utils/file_utils.py,sha256=kylM3-5YZl9kwkYZIfBR0FIwFPRJjZnNbmhmx2RGNrY,17368
559
- keras/src/utils/image_dataset_utils.py,sha256=doL8q0q4DciFnlO-IyKN1v2Emh_gP4sI2rDhgeKL5qs,16964
559
+ keras/src/utils/grain_utils.py,sha256=Wfwv12E3UrNZjJjTEk2JVV6_YEUav35UJ6bV1UAPEIk,886
560
+ keras/src/utils/image_dataset_utils.py,sha256=0lOzD1CiXwZOe1wW-5uvFKuIgot9PWUC9KJJA0NVuP8,24017
560
561
  keras/src/utils/image_utils.py,sha256=HUI7Zcgqvsmm8a1xwfMwr7pOhnG4lsChP8Owv-xlCTM,16703
561
562
  keras/src/utils/io_utils.py,sha256=2u6b1SEQmkxo4IRPkKBedAkKuRIQMF4CdD-B5ko0Cek,4432
562
563
  keras/src/utils/jax_layer.py,sha256=h4MVRADUAL8t8pHaTKPYt81rxUuW6BfE4MnmQp5ETM0,27189
563
564
  keras/src/utils/jax_utils.py,sha256=vY3P4S9mfWEjdirLd81ocKqeCm-UVfgQ1yTi6UHdBiM,322
564
565
  keras/src/utils/model_visualization.py,sha256=I2NWeo-g0NpCM88HXMDyAbpvF_KIZuJr5hfOLGm922U,17799
565
- keras/src/utils/module_utils.py,sha256=ej6YM6I9dTapJvuhZsA7lvp1wxbKzbzI8T8abjLuxLA,1948
566
+ keras/src/utils/module_utils.py,sha256=j3YV_MkoRffRXtJSEjbbrZ3JVdau657JEb4eNKVrmHg,1976
566
567
  keras/src/utils/naming.py,sha256=bPowKBlgiVP_6XtVlNVHxrxheKuJy2c0e-oEM8ocZQY,1776
567
568
  keras/src/utils/numerical_utils.py,sha256=Uqe5nu1HXmiZuh5-MznomtDSVSO9FgFaltdDtGnN61o,7205
568
569
  keras/src/utils/progbar.py,sha256=HWvgFustRG5WqsiIayaaSiUU2jOYkioEqbQdywmBm0c,10469
@@ -570,7 +571,7 @@ keras/src/utils/python_utils.py,sha256=j8d1oA6oEnU5J0xosWU3t9wIGiblj67OStEn7KJ7j
570
571
  keras/src/utils/rng_utils.py,sha256=NVk7Aavt8A1945YpBIGW18OPebo29g2qHgWZvkgRkW8,2168
571
572
  keras/src/utils/sequence_utils.py,sha256=CveyJ5VM5KJ4pFlo6LWT9omzd_xDeMRjTgczIKekP3Y,4716
572
573
  keras/src/utils/summary_utils.py,sha256=jjbTB6NTqMniSWXPKeNY6dvpn-U37WJdwqdfl8uX5nI,15447
573
- keras/src/utils/text_dataset_utils.py,sha256=JUqDauTec6uRZs71SbKeVjxHx_CNqqOWkoXQ1Q7ldRs,10701
574
+ keras/src/utils/text_dataset_utils.py,sha256=6ACAHwEhjjd5rjfzwLl7Es2qkvmSBUWs5IYQLGrHFrQ,14543
574
575
  keras/src/utils/tf_utils.py,sha256=FTunWC5kdyjsK0TyxQxiHGaYNaAyUxhMX52Zee_Rz9c,4930
575
576
  keras/src/utils/timeseries_dataset_utils.py,sha256=rVxSuqlYLpzw_dVo8Ym5HSE2jFmndS8MAv4Uewycojo,9842
576
577
  keras/src/utils/torch_utils.py,sha256=y8vRSx_R9MtC1IpRqyK6LBRETxwrlUFnsu45R4_-7Hs,6638
@@ -592,7 +593,7 @@ keras/utils/bounding_boxes/__init__.py,sha256=jtvQll4u8ZY0Z96HwNhP1nxWEG9FM3gI-6
592
593
  keras/utils/legacy/__init__.py,sha256=oSYZz6uS8UxSElRaaJYWJEoweJ4GAasZjnn7fNaOlog,342
593
594
  keras/visualization/__init__.py,sha256=UKWmiy6sps4SWlmQi9WX8_Z53cPpLlphz2zIeHdwJpQ,722
594
595
  keras/wrappers/__init__.py,sha256=QkS-O5K8qGS7C3sytF8MpmO6PasATpNVGF8qtb7Ojsw,407
595
- keras_nightly-3.12.0.dev2025081804.dist-info/METADATA,sha256=0WzHzaCWMRbl5mrtjHaDa9jX9HWaEXOHlizpxLKc6fE,5970
596
- keras_nightly-3.12.0.dev2025081804.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
597
- keras_nightly-3.12.0.dev2025081804.dist-info/top_level.txt,sha256=ptcw_-QuGZ4ZDjMdwi_Z0clZm8QAqFdvzzFnDEOTs9o,6
598
- keras_nightly-3.12.0.dev2025081804.dist-info/RECORD,,
596
+ keras_nightly-3.12.0.dev2025082003.dist-info/METADATA,sha256=LZyWd4xCofCT4FX7BEdbxKxsT4K1qqybGB16Rj2U_II,5970
597
+ keras_nightly-3.12.0.dev2025082003.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
598
+ keras_nightly-3.12.0.dev2025082003.dist-info/top_level.txt,sha256=ptcw_-QuGZ4ZDjMdwi_Z0clZm8QAqFdvzzFnDEOTs9o,6
599
+ keras_nightly-3.12.0.dev2025082003.dist-info/RECORD,,