tf-keras-nightly 2.21.0.dev2025110310__py3-none-any.whl → 2.21.0.dev2025110610__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tf_keras/__init__.py CHANGED
@@ -27,4 +27,4 @@ from tf_keras.src.engine.sequential import Sequential
27
27
  from tf_keras.src.engine.training import Model
28
28
 
29
29
 
30
- __version__ = "2.21.0.dev2025110310"
30
+ __version__ = "2.21.0.dev2025110610"
@@ -231,7 +231,7 @@ class TensorLikeDataAdapter(DataAdapter):
231
231
  return True
232
232
  return False
233
233
 
234
- return all(_is_tensor(v) for v in flat_inputs)
234
+ return all(_is_tensor(v) for v in flat_inputs if v is not None)
235
235
 
236
236
  def __init__(
237
237
  self,
@@ -259,7 +259,7 @@ class TensorLikeDataAdapter(DataAdapter):
259
259
  inputs = pack_x_y_sample_weight(x, y, sample_weights)
260
260
 
261
261
  num_samples = set(
262
- int(i.shape[0]) for i in tf.nest.flatten(inputs)
262
+ int(i.shape[0]) for i in tf.nest.flatten(inputs) if i is not None
263
263
  ).pop()
264
264
  _check_data_cardinality(inputs)
265
265
 
@@ -386,7 +386,7 @@ class TensorLikeDataAdapter(DataAdapter):
386
386
 
387
387
  def grab_batch(i, data):
388
388
  return tf.nest.map_structure(
389
- lambda d: tf.gather(d, i, axis=0), data
389
+ lambda d: tf.gather(d, i, axis=0) if d is not None else d, data
390
390
  )
391
391
 
392
392
  dataset = dataset.map(grab_batch, num_parallel_calls=tf.data.AUTOTUNE)
@@ -459,7 +459,7 @@ class GenericArrayLikeDataAdapter(TensorLikeDataAdapter):
459
459
  if not TensorLikeDataAdapter.can_handle(
460
460
  x, y
461
461
  ) and not CompositeTensorDataAdapter.can_handle(x, y):
462
- return all(_is_array_like(v) for v in flat_inputs)
462
+ return all(_is_array_like(v) for v in flat_inputs if v is not None)
463
463
  else:
464
464
  return False
465
465
 
@@ -496,7 +496,7 @@ class GenericArrayLikeDataAdapter(TensorLikeDataAdapter):
496
496
  shape[0] = None
497
497
  return tuple(shape)
498
498
 
499
- flat_dtypes = [inp.dtype for inp in flat_inputs]
499
+ flat_dtypes = [inp.dtype for inp in flat_inputs if inp is not None]
500
500
  contiguous = True
501
501
  if self._shuffle and self._shuffle != "batch":
502
502
  contiguous = False
@@ -509,15 +509,26 @@ class GenericArrayLikeDataAdapter(TensorLikeDataAdapter):
509
509
  # to a Tensor may force it into memory..
510
510
  def py_method(ind):
511
511
  def slice_array(data):
512
+ if data is None:
513
+ return None
512
514
  return training_utils.slice_arrays(
513
515
  data, ind.numpy(), contiguous=contiguous
514
516
  )
515
517
 
516
- return [slice_array(inp) for inp in flat_inputs]
518
+ return [
519
+ slice_array(inp) for inp in flat_inputs if inp is not None
520
+ ]
517
521
 
518
- flat_out = tf.py_function(py_method, [indices], flat_dtypes)
519
- for v, original_inp in zip(flat_out, flat_inputs):
520
- v.set_shape(dynamic_shape_like(original_inp))
522
+ results = tf.py_function(py_method, [indices], flat_dtypes)
523
+ results_it = iter(results)
524
+ flat_out = []
525
+ for original_inp in flat_inputs:
526
+ if original_inp is None:
527
+ flat_out.append(None)
528
+ else:
529
+ v = next(results_it)
530
+ v.set_shape(dynamic_shape_like(original_inp))
531
+ flat_out.append(v)
521
532
  return tf.nest.pack_sequence_as(inputs, flat_out)
522
533
 
523
534
  dataset = indices_dataset.map(
@@ -608,8 +619,10 @@ class CompositeTensorDataAdapter(DataAdapter):
608
619
  return True
609
620
  return _is_composite(v)
610
621
 
611
- return any(_is_composite(v) for v in flat_inputs) and all(
612
- _is_tensor_or_composite(v) for v in flat_inputs
622
+ return any(
623
+ _is_composite(v) for v in flat_inputs if v is not None
624
+ ) and all(
625
+ _is_tensor_or_composite(v) for v in flat_inputs if v is not None
613
626
  )
614
627
 
615
628
  def __init__(
@@ -1944,14 +1957,18 @@ def single_batch_iterator(
1944
1957
 
1945
1958
 
1946
1959
  def _check_data_cardinality(data):
1947
- num_samples = set(int(i.shape[0]) for i in tf.nest.flatten(data))
1960
+ num_samples = set(
1961
+ int(i.shape[0]) for i in tf.nest.flatten(data) if i is not None
1962
+ )
1948
1963
  if len(num_samples) > 1:
1949
1964
  msg = "Data cardinality is ambiguous:\n"
1950
1965
  for label, single_data in zip(["x", "y", "sample_weight"], data):
1951
1966
  msg += " {} sizes: {}\n".format(
1952
1967
  label,
1953
1968
  ", ".join(
1954
- str(i.shape[0]) for i in tf.nest.flatten(single_data)
1969
+ str(i.shape[0])
1970
+ for i in tf.nest.flatten(single_data)
1971
+ if i is not None
1955
1972
  ),
1956
1973
  )
1957
1974
  msg += "Make sure all arrays contain the same number of samples."
@@ -351,25 +351,45 @@ class Functional(training_lib.Model):
351
351
  if isinstance(self._nested_inputs, dict):
352
352
  # Case where `_nested_inputs` is a plain dict of Inputs.
353
353
  names = sorted(self._nested_inputs.keys())
354
- return [
355
- input_spec.InputSpec(
356
- shape=shape_with_no_batch_size(self._nested_inputs[name]),
357
- allow_last_axis_squeeze=True,
358
- name=name,
354
+ specs = []
355
+ for name in names:
356
+ layer = self._nested_inputs[name]._keras_history.layer
357
+ optional = (
358
+ layer.optional
359
+ if isinstance(layer, input_layer_module.InputLayer)
360
+ else False
359
361
  )
360
- for name in names
361
- ]
362
+ specs.append(
363
+ input_spec.InputSpec(
364
+ shape=shape_with_no_batch_size(
365
+ self._nested_inputs[name]
366
+ ),
367
+ allow_last_axis_squeeze=True,
368
+ name=name,
369
+ optional=optional,
370
+ )
371
+ )
372
+ return specs
362
373
  else:
363
374
  # Single input, or list / tuple of inputs.
364
375
  # The data may be passed as a dict keyed by input name.
365
- return [
366
- input_spec.InputSpec(
367
- shape=shape_with_no_batch_size(x),
368
- allow_last_axis_squeeze=True,
369
- name=x._keras_history.layer.name,
376
+ specs = []
377
+ for x in self.inputs:
378
+ layer = x._keras_history.layer
379
+ optional = (
380
+ layer.optional
381
+ if isinstance(layer, input_layer_module.InputLayer)
382
+ else False
370
383
  )
371
- for x in self.inputs
372
- ]
384
+ specs.append(
385
+ input_spec.InputSpec(
386
+ shape=shape_with_no_batch_size(x),
387
+ allow_last_axis_squeeze=True,
388
+ name=x._keras_history.layer.name,
389
+ optional=optional,
390
+ )
391
+ )
392
+ return specs
373
393
 
374
394
  @input_spec.setter
375
395
  def input_spec(self, value):
@@ -644,7 +664,8 @@ class Functional(training_lib.Model):
644
664
  else:
645
665
  masks = self._flatten_to_reference_inputs(mask)
646
666
  for input_t, mask in zip(inputs, masks):
647
- input_t._keras_mask = mask
667
+ if input_t is not None:
668
+ input_t._keras_mask = mask
648
669
 
649
670
  # Dictionary mapping reference tensors to computed tensors.
650
671
  tensor_dict = {}
@@ -98,6 +98,8 @@ class InputLayer(base_layer.Layer):
98
98
  `tf.TypeSpec` represents the entire batch. When provided, all other
99
99
  args except name must be `None`.
100
100
  name: Optional name of the layer (string).
101
+ optional: Boolean, whether the input is optional or not. An optional
102
+ input can accept `None` values.
101
103
  """
102
104
 
103
105
  @traceback_utils.filter_traceback
@@ -111,6 +113,7 @@ class InputLayer(base_layer.Layer):
111
113
  name=None,
112
114
  ragged=None,
113
115
  type_spec=None,
116
+ optional=False,
114
117
  **kwargs,
115
118
  ):
116
119
  self._init_input_shape = input_shape
@@ -180,6 +183,7 @@ class InputLayer(base_layer.Layer):
180
183
  self.ragged = True if ragged else False
181
184
  self.batch_size = batch_size
182
185
  self.supports_masking = True
186
+ self.optional = optional
183
187
 
184
188
  if isinstance(input_shape, tf.TensorShape):
185
189
  input_shape = tuple(input_shape.as_list())
@@ -284,6 +288,7 @@ class InputLayer(base_layer.Layer):
284
288
  "sparse": self.sparse,
285
289
  "ragged": self.ragged,
286
290
  "name": self.name,
291
+ "optional": self.optional,
287
292
  }
288
293
  return config
289
294
 
@@ -303,6 +308,7 @@ def Input(
303
308
  tensor=None,
304
309
  ragged=None,
305
310
  type_spec=None,
311
+ optional=False,
306
312
  **kwargs,
307
313
  ):
308
314
  """`Input()` is used to instantiate a TF-Keras tensor.
@@ -341,6 +347,8 @@ def Input(
341
347
  [this guide](https://www.tensorflow.org/guide/ragged_tensor).
342
348
  type_spec: A `tf.TypeSpec` object to create the input placeholder from.
343
349
  When provided, all other args except name must be None.
350
+ optional: Boolean, whether the input is optional or not. An optional
351
+ input can accept `None` values.
344
352
  **kwargs: deprecated arguments support. Supports `batch_shape` and
345
353
  `batch_input_shape`.
346
354
 
@@ -415,6 +423,7 @@ def Input(
415
423
  "ragged": ragged,
416
424
  "input_tensor": tensor,
417
425
  "type_spec": type_spec,
426
+ "optional": optional,
418
427
  }
419
428
 
420
429
  batch_input_shape = kwargs.pop(
@@ -56,6 +56,8 @@ class InputSpec:
56
56
  as long as the last axis of the spec is 1.
57
57
  name: Expected key corresponding to this input when passing data as
58
58
  a dictionary.
59
+ optional: Boolean, whether the input is optional or not. An optional input
60
+ can accept `None` values.
59
61
 
60
62
  Example:
61
63
 
@@ -82,6 +84,7 @@ class InputSpec:
82
84
  axes=None,
83
85
  allow_last_axis_squeeze=False,
84
86
  name=None,
87
+ optional=False,
85
88
  ):
86
89
  self.dtype = tf.as_dtype(dtype).name if dtype is not None else None
87
90
  shape = tf.TensorShape(shape)
@@ -99,6 +102,7 @@ class InputSpec:
99
102
  self.min_ndim = min_ndim
100
103
  self.name = name
101
104
  self.allow_last_axis_squeeze = allow_last_axis_squeeze
105
+ self.optional = optional
102
106
  try:
103
107
  axes = axes or {}
104
108
  self.axes = {int(k): axes[k] for k in axes}
@@ -204,7 +208,11 @@ def assert_input_compatibility(input_spec, inputs, layer_name):
204
208
  inputs = list_inputs
205
209
 
206
210
  inputs = tf.nest.flatten(inputs)
207
- for x in inputs:
211
+ for _, (x, spec) in enumerate(zip(inputs, input_spec)):
212
+ if spec is None:
213
+ continue
214
+ if x is None and spec.optional:
215
+ continue
208
216
  # Having a shape/dtype is the only commonality of the various
209
217
  # tensor-like objects that may be passed. The most common kind of
210
218
  # invalid type we are guarding for is a Layer instance (Functional API),
@@ -224,6 +232,8 @@ def assert_input_compatibility(input_spec, inputs, layer_name):
224
232
  for input_index, (x, spec) in enumerate(zip(inputs, input_spec)):
225
233
  if spec is None:
226
234
  continue
235
+ if x is None and spec.optional:
236
+ continue
227
237
 
228
238
  shape = tf.TensorShape(x.shape)
229
239
  if shape.rank is None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tf_keras-nightly
3
- Version: 2.21.0.dev2025110310
3
+ Version: 2.21.0.dev2025110610
4
4
  Summary: Deep learning for humans.
5
5
  Home-page: https://keras.io/
6
6
  Download-URL: https://github.com/keras-team/tf-keras/tags
@@ -1,4 +1,4 @@
1
- tf_keras/__init__.py,sha256=CIM1inHN-Z4qktEb9YhaQvD6DuRxjYaAsOVXT00uIvA,911
1
+ tf_keras/__init__.py,sha256=d8FZO9rcuy_mo7EHwcK_lvqhOSmSKrXOrIO0GBT9G7I,911
2
2
  tf_keras/__internal__/__init__.py,sha256=OHQbeIC0QtRBI7dgXaJaVbH8F00x8dCI-DvEcIfyMsE,671
3
3
  tf_keras/__internal__/backend/__init__.py,sha256=LnMs2A6685gDG79fxqmdulIYlVE_3WmXlBTBo9ZWYcw,162
4
4
  tf_keras/__internal__/layers/__init__.py,sha256=F5SGMhOTPzm-PR44VrfinURHcVeQPIEdwnZlAkSTB3A,176
@@ -276,11 +276,11 @@ tf_keras/src/engine/base_layer_utils.py,sha256=AFjqwXM-WShf0dfsyIotlXYIRJlqYyjQh
276
276
  tf_keras/src/engine/base_layer_v1.py,sha256=MMfdUKB8tjbjjX9Pj5b6E5XgrM-BnVx0ilSndcR_3QA,102875
277
277
  tf_keras/src/engine/base_preprocessing_layer.py,sha256=xne5VVtj9_IE1_cjh-kaPk-utoMY7mYwTOcgybFfY34,12650
278
278
  tf_keras/src/engine/compile_utils.py,sha256=F6KxbaXnppns5XCOJl8wzsiQ1riEp43s0G0SWsWAUE0,31757
279
- tf_keras/src/engine/data_adapter.py,sha256=UqYJBUDiS-vyu7euVYxQrXw0U9-piO7SwTetkGBSMwg,71654
280
- tf_keras/src/engine/functional.py,sha256=igoSYLFSVYvZ8EUjwbowrDyYywI-V3h9ypeybIN_Jr4,70182
279
+ tf_keras/src/engine/data_adapter.py,sha256=N5UV4KYF-F7YJdB5kHR8pACSrFlSqQte_DsmA8Ksa6Y,72257
280
+ tf_keras/src/engine/functional.py,sha256=ojvj0DkGrnqd16dEIqj2AgFlmxB2s6e-3qjs78fN85E,71006
281
281
  tf_keras/src/engine/functional_utils.py,sha256=5creFfo9UoG5OLJgkcw9gsfT-qch-RamT5IsU8675rU,11048
282
- tf_keras/src/engine/input_layer.py,sha256=QVAA9ZrhfUlcx0Tj_UuNF3t1nxYrhyks6vDJJeb18W8,18258
283
- tf_keras/src/engine/input_spec.py,sha256=W3mojApaM_lN8Vr2MCvddE8RhHckxwObzpySPm16oEM,12076
282
+ tf_keras/src/engine/input_layer.py,sha256=SRoRYG_PElMRMAypjeQRZ_0Ub4tu5jdRc6ASL59BAsY,18650
283
+ tf_keras/src/engine/input_spec.py,sha256=H2U8yNz7eabUozm4QAcL3XcQoxj6iAwvK6ecU7w8O5g,12455
284
284
  tf_keras/src/engine/keras_tensor.py,sha256=rmIyf-sMKzGAMXzob0hCTZ3qA4JBYyIM85XUdmOPmqQ,28858
285
285
  tf_keras/src/engine/node.py,sha256=mevKNFEtzeVbwLRuwB7sMzQGKt6ppIxLmMcfQMzu8N8,14254
286
286
  tf_keras/src/engine/partial_batch_padding_handler.py,sha256=TNZvGXL-fvmZLLHIMPX_hy0w9LT8W52DHW7ZtnEvBvI,4325
@@ -584,7 +584,7 @@ tf_keras/src/utils/legacy/__init__.py,sha256=EfMmeHYDzwvxNaktPhQbkTdcPSIGCqMhBND
584
584
  tf_keras/utils/__init__.py,sha256=b7_d-USe_EmLo02_P99Q1rUCzKBYayPCfiYFStP-0nw,2735
585
585
  tf_keras/utils/experimental/__init__.py,sha256=DzGogE2AosjxOVILQBT8PDDcqbWTc0wWnZRobCdpcec,97
586
586
  tf_keras/utils/legacy/__init__.py,sha256=7ujlDa5HeSRcth2NdqA0S1P2-VZF1kB3n68jye6Dj-8,189
587
- tf_keras_nightly-2.21.0.dev2025110310.dist-info/METADATA,sha256=2R_39i6iH7QwHDZXDz9yKlL-F_AbkhvN4MMOu8uTlaI,1857
588
- tf_keras_nightly-2.21.0.dev2025110310.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
589
- tf_keras_nightly-2.21.0.dev2025110310.dist-info/top_level.txt,sha256=LC8FK7zHDNKxB17C6lGKvrZ_fZZGJsRiBK23SfiDegY,9
590
- tf_keras_nightly-2.21.0.dev2025110310.dist-info/RECORD,,
587
+ tf_keras_nightly-2.21.0.dev2025110610.dist-info/METADATA,sha256=eKYXYpfPfJQeCvRklYOV1Zsk0Z85qBRaCBoXZ6q8DM4,1857
588
+ tf_keras_nightly-2.21.0.dev2025110610.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
589
+ tf_keras_nightly-2.21.0.dev2025110610.dist-info/top_level.txt,sha256=LC8FK7zHDNKxB17C6lGKvrZ_fZZGJsRiBK23SfiDegY,9
590
+ tf_keras_nightly-2.21.0.dev2025110610.dist-info/RECORD,,