keras-nightly 3.14.0.dev2026012804__py3-none-any.whl → 3.14.0.dev2026012904__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,5 @@
1
+ import math
2
+
1
3
  import ml_dtypes
2
4
  import numpy as np
3
5
 
@@ -118,6 +120,190 @@ def abs_max_quantize(
118
120
  return outputs, scale
119
121
 
120
122
 
123
+ @keras_export("keras.quantizers.abs_max_quantize_grouped_with_zero_point")
124
+ def abs_max_quantize_grouped_with_zero_point(
125
+ inputs,
126
+ block_size,
127
+ value_range=(-8, 7),
128
+ dtype="int8",
129
+ epsilon=backend.epsilon(),
130
+ to_numpy=False,
131
+ ):
132
+ """Quantizes a 2D tensor using grouped asymmetric quantization with
133
+ zero point.
134
+
135
+ Groups are formed along axis 0 (the input/contracting dimension).
136
+ Each group of `block_size` rows gets its own scale factor and zero point
137
+ per column. This is useful for weight distributions that are not centered
138
+ around zero.
139
+
140
+ Args:
141
+ inputs: Input tensor to quantize. Shape: `(input_dim, output_dim)`.
142
+ block_size: Number of elements per group along axis 0.
143
+ value_range: Tuple of `(min, max)` quantization range.
144
+ dtype: Data type of quantized output.
145
+ epsilon: Small value to avoid division by zero.
146
+ to_numpy: Whether to perform computation in numpy for memory
147
+ efficiency.
148
+
149
+ Returns:
150
+ A tuple `(quantized_tensor, scale, zero_point)` where:
151
+ - `quantized_tensor`: Same shape as inputs, dtype=`dtype`.
152
+ - `scale`: Shape `(n_groups, output_dim)` where
153
+ `n_groups = ceil(input_dim / block_size)`.
154
+ - `zero_point`: Shape `(n_groups, output_dim)`, dtype=`uint8`.
155
+
156
+ Example:
157
+
158
+ ```python
159
+ >>> import numpy as np
160
+ >>> from keras.quantizers import abs_max_quantize_grouped_with_zero_point
161
+ >>> kernel = np.random.randn(512, 256).astype("float32")
162
+ >>> quantized, scale, zero_point = abs_max_quantize_grouped_with_zero_point(
163
+ ... kernel, block_size=128, value_range=(-8, 7)
164
+ ... )
165
+ >>> quantized.shape
166
+ (512, 256)
167
+ >>> scale.shape # 512 / 128 = 4 groups
168
+ (4, 256)
169
+ >>> zero_point.shape
170
+ (4, 256)
171
+ ```
172
+ """
173
+ if to_numpy:
174
+ return _abs_max_quantize_grouped_with_zero_point_numpy(
175
+ inputs, block_size, value_range, dtype, epsilon
176
+ )
177
+ return _abs_max_quantize_grouped_with_zero_point_tensor(
178
+ inputs, block_size, value_range, dtype, epsilon
179
+ )
180
+
181
+
182
+ def _abs_max_quantize_grouped_with_zero_point_numpy(
183
+ inputs, block_size, value_range, dtype, epsilon
184
+ ):
185
+ """NumPy implementation of grouped asymmetric quantization.
186
+
187
+ Uses NumPy for computation to reduce GPU memory usage during
188
+ model quantization.
189
+ """
190
+ original_dtype = backend.standardize_dtype(inputs.dtype)
191
+ inputs = ops.convert_to_numpy(inputs)
192
+
193
+ input_dim, output_dim = inputs.shape
194
+ n_groups = math.ceil(input_dim / block_size)
195
+ qmin, qmax = value_range
196
+
197
+ # Zero-pad rows so input_dim is divisible by block_size
198
+ padded_input_dim = n_groups * block_size
199
+ if padded_input_dim > input_dim:
200
+ padding = np.zeros(
201
+ (padded_input_dim - input_dim, output_dim), dtype=inputs.dtype
202
+ )
203
+ inputs_padded = np.concatenate([inputs, padding], axis=0)
204
+ else:
205
+ inputs_padded = inputs
206
+
207
+ inputs_reshaped = inputs_padded.reshape(n_groups, block_size, output_dim)
208
+
209
+ # Compute per-group min/max for asymmetric quantization
210
+ min_val = np.min(inputs_reshaped, axis=1, keepdims=True)
211
+ max_val = np.max(inputs_reshaped, axis=1, keepdims=True)
212
+
213
+ # Scale maps the [min, max] range to [qmin, qmax]
214
+ scale = np.divide(np.subtract(max_val, min_val) + epsilon, qmax - qmin)
215
+
216
+ # Zero point shifts the quantized range to include the original zero
217
+ zero_point = np.round(np.divide(-min_val, scale)) + qmin
218
+ zero_point = np.clip(zero_point, qmin, qmax)
219
+
220
+ # Quantize: q = round(input / scale) + zero_point
221
+ outputs = np.round(np.divide(inputs_reshaped, scale)) + zero_point
222
+ outputs = np.clip(outputs, qmin, qmax)
223
+ outputs = outputs.astype(dtype)
224
+
225
+ # Remove padding and squeeze to (n_groups, output_dim)
226
+ outputs = outputs.reshape(padded_input_dim, output_dim)[:input_dim, :]
227
+ scale = np.squeeze(scale, axis=1)
228
+ zero_point = np.squeeze(zero_point, axis=1).astype("int8")
229
+
230
+ return (
231
+ ops.convert_to_tensor(outputs),
232
+ ops.convert_to_tensor(scale, dtype=original_dtype),
233
+ ops.convert_to_tensor(zero_point),
234
+ )
235
+
236
+
237
+ def _abs_max_quantize_grouped_with_zero_point_tensor(
238
+ inputs, block_size, value_range, dtype, epsilon
239
+ ):
240
+ """Tensor backend implementation of grouped asymmetric quantization."""
241
+ original_dtype = backend.standardize_dtype(inputs.dtype)
242
+ inputs = ops.convert_to_tensor(inputs)
243
+
244
+ input_shape = ops.shape(inputs)
245
+ input_dim = input_shape[0]
246
+ output_dim = input_shape[1]
247
+ qmin, qmax = value_range
248
+
249
+ # Infer bit-width from quantization range (e.g., [-8, 7] -> 4 bits)
250
+ num_levels = qmax - qmin + 1
251
+ bits = int(math.log2(num_levels))
252
+
253
+ n_groups = int(math.ceil(int(input_dim) / block_size))
254
+ padded_input_dim = n_groups * block_size
255
+
256
+ # Transpose to [out_features, in_features] for
257
+ # compute_quantization_parameters
258
+ inputs_t = ops.transpose(inputs)
259
+
260
+ # Compute scale and zero point using the unified quantization function
261
+ scale_t, zero_point_t, _ = compute_quantization_parameters(
262
+ inputs_t,
263
+ bits=bits,
264
+ symmetric=False,
265
+ per_channel=True,
266
+ group_size=block_size,
267
+ compute_dtype=original_dtype,
268
+ epsilon=epsilon,
269
+ signed=True,
270
+ )
271
+
272
+ # Transpose results back to (n_groups, output_dim)
273
+ scale = ops.transpose(scale_t)
274
+ zero_point = ops.transpose(zero_point_t)
275
+
276
+ # Zero-pad rows so input_dim is divisible by block_size
277
+ pad_size = padded_input_dim - int(input_dim)
278
+ if pad_size > 0:
279
+ padding = ops.zeros((pad_size, output_dim), dtype=inputs.dtype)
280
+ inputs_padded = ops.concatenate([inputs, padding], axis=0)
281
+ else:
282
+ inputs_padded = inputs
283
+
284
+ inputs_reshaped = ops.reshape(
285
+ inputs_padded, (n_groups, block_size, output_dim)
286
+ )
287
+
288
+ # Expand scale and zero_point for broadcasting across block_size
289
+ scale_expanded = ops.expand_dims(scale, axis=1)
290
+ zero_point_expanded = ops.expand_dims(zero_point, axis=1)
291
+
292
+ # Quantize: q = round(input / scale) + zero_point
293
+ outputs = ops.add(
294
+ ops.round(ops.divide(inputs_reshaped, scale_expanded)),
295
+ zero_point_expanded,
296
+ )
297
+ outputs = ops.clip(outputs, qmin, qmax)
298
+ outputs = ops.cast(outputs, dtype)
299
+
300
+ # Remove padding
301
+ outputs = ops.reshape(outputs, (padded_input_dim, output_dim))
302
+ outputs = outputs[:input_dim, :]
303
+
304
+ return outputs, scale, zero_point
305
+
306
+
121
307
  @keras_export("keras.quantizers.AbsMaxQuantizer")
122
308
  class AbsMaxQuantizer(Quantizer):
123
309
  def __init__(
@@ -796,6 +982,8 @@ def compute_quantization_parameters(
796
982
  per_channel=False,
797
983
  group_size=-1,
798
984
  compute_dtype="float32",
985
+ epsilon=0.0,
986
+ signed=False,
799
987
  ):
800
988
  """
801
989
  Computes the scale and zero-point for quantizing weight tensors.
@@ -816,10 +1004,17 @@ def compute_quantization_parameters(
816
1004
  per_channel: bool. Whether to quantize per channel.
817
1005
  group_size: int. The group size for quantization. -1 means no grouping.
818
1006
  compute_dtype: str. The dtype for computation. Defaults to "float32".
1007
+ epsilon: float. Small value added to (max - min) before computing
1008
+ scale to avoid division by zero. Defaults to 0.0.
1009
+ signed: bool. Whether to use signed quantization range. If True, uses
1010
+ range [-2^(bits-1), 2^(bits-1)-1] (e.g., [-8, 7] for 4-bit).
1011
+ If False, uses range [0, 2^bits-1] (e.g., [0, 15] for 4-bit).
1012
+ Defaults to False.
819
1013
 
820
1014
  Returns:
821
1015
  scale: KerasTensor. The scale tensor for quantization.
822
- zero: KerasTensor. The zero tensor for quantization.
1016
+ zero: KerasTensor. The zero tensor for quantization (int8 if signed,
1017
+ uint8 if unsigned).
823
1018
  maxq: scalar. The maximum quantization value.
824
1019
  """
825
1020
  # Input validation
@@ -874,13 +1069,31 @@ def compute_quantization_parameters(
874
1069
 
875
1070
  # Compute scale and zero-point
876
1071
  maxq = ops.cast(ops.subtract(ops.power(2, bits), 1), compute_dtype)
877
- scale = ops.divide(ops.subtract(max_values, min_values), maxq)
1072
+ range_values = ops.subtract(max_values, min_values)
1073
+ if epsilon > 0:
1074
+ range_values = ops.add(range_values, epsilon)
1075
+ scale = ops.divide(range_values, maxq)
878
1076
  scale = ops.where(ops.less_equal(scale, 0), 1e-8, scale)
879
1077
 
880
- if symmetric:
881
- zero = ops.full_like(scale, ops.divide(ops.add(maxq, 1), 2))
1078
+ # Compute zero-point based on signed/unsigned mode
1079
+ if signed:
1080
+ # For signed range [-2^(bits-1), 2^(bits-1)-1], e.g., [-8, 7] for 4-bit
1081
+ qmin = -(2 ** (bits - 1)) # e.g., -8 for 4-bit
1082
+ qmax_signed = 2 ** (bits - 1) - 1 # e.g., 7 for 4-bit
1083
+ if symmetric:
1084
+ zero = ops.full_like(scale, ops.divide(ops.add(maxq, 1), 2) + qmin)
1085
+ else:
1086
+ # zero_signed = round(-min / scale) + qmin
1087
+ zero = ops.add(
1088
+ ops.round(ops.divide(ops.negative(min_values), scale)), qmin
1089
+ )
1090
+ zero = ops.clip(zero, qmin, qmax_signed)
882
1091
  else:
883
- zero = ops.round(ops.divide(ops.negative(min_values), scale))
1092
+ # For unsigned range [0, 2^bits-1], e.g., [0, 15] for 4-bit
1093
+ if symmetric:
1094
+ zero = ops.full_like(scale, ops.divide(ops.add(maxq, 1), 2))
1095
+ else:
1096
+ zero = ops.round(ops.divide(ops.negative(min_values), scale))
884
1097
 
885
1098
  # Reshape output to [out_features, n_groups] or [out_features, 1]
886
1099
  if n_groups > 1:
@@ -893,7 +1106,8 @@ def compute_quantization_parameters(
893
1106
  scale = ops.tile(ops.reshape(scale, (1, 1)), (out_features, 1))
894
1107
  zero = ops.tile(ops.reshape(zero, (1, 1)), (out_features, 1))
895
1108
 
896
- return scale, ops.cast(zero, "uint8"), maxq
1109
+ zero_dtype = "int8" if signed else "uint8"
1110
+ return scale, ops.cast(zero, zero_dtype), maxq
897
1111
 
898
1112
 
899
1113
  def quantize_with_zero_point(input_tensor, scale, zero, maxq):
@@ -942,51 +1156,67 @@ def dequantize_with_zero_point(input_tensor, scale, zero):
942
1156
  )
943
1157
 
944
1158
 
945
- def quantize_with_sz_map(weights_matrix, scale, zero, g_idx, maxq):
1159
+ def quantize_with_sz_map(
1160
+ weights_matrix, scale, zero, g_idx, maxq, group_axis=-1
1161
+ ):
946
1162
  """Quantize the weight matrix from group params.
947
1163
 
948
1164
  This function uses the provided scale and zero tensors to quantize the
949
- input weights_matrix according to the group indices. It maps each column
950
- of the weights_matrix to its corresponding group parameters and performs
951
- the quantization operation.
1165
+ input weights_matrix according to the group indices. It maps each position
1166
+ along group_axis of the weights_matrix to its corresponding group
1167
+ parameters and performs the quantization operation.
952
1168
 
953
1169
  Args:
954
- weights_matrix: 2D tensor of shape [out_features, in_features].
955
- scale: Per-group scale tensor of shape [out_features, n_groups].
956
- zero: Per-group zero-point tensor of shape [out_features, n_groups].
957
- g_idx: Integer tensor of shape [in_features,] mapping each column to
958
- its group index.
1170
+ weights_matrix: Tensor to quantize.
1171
+ scale: Per-group scale tensor with n_groups along group_axis.
1172
+ zero: Per-group zero-point tensor with n_groups along group_axis.
1173
+ g_idx: 1D integer tensor of length equal to the size of
1174
+ `weights_matrix` along the dimension being quantized. Each
1175
+ element specifies which group index (0 to n_groups-1) that
1176
+ position belongs to. For example, with 128 columns and
1177
+ group_size=32, g_idx would be
1178
+ `[0,0,...,0, 1,1,...,1, 2,2,...,2, 3,3,...,3]` (32 of each).
959
1179
  maxq: Scalar (float) representing the maximum integer quantization
960
1180
  level (e.g., 2^bits - 1).
1181
+ group_axis: The axis in `scale` and `zero` along which to index
1182
+ using `g_idx`. This determines which dimension of the
1183
+ scale/zero tensors contains the per-group values. Default: -1
1184
+ (last axis).
961
1185
 
962
1186
  Returns:
963
1187
  A tensor with the same shape as `weights_matrix` containing the
964
1188
  quantized weights produced using the provided group parameters.
965
1189
  """
966
1190
  groups = ops.cast(g_idx, "int32")
967
- scale_cols = ops.take(scale, groups, axis=1) # [out_features, in_features]
968
- zero_cols = ops.take(zero, groups, axis=1) # [out_features, in_features]
1191
+ scale_cols = ops.take(scale, groups, axis=group_axis)
1192
+ zero_cols = ops.take(zero, groups, axis=group_axis)
969
1193
 
970
1194
  # Quantize elementwise, then cast to int
971
1195
  return quantize_with_zero_point(weights_matrix, scale_cols, zero_cols, maxq)
972
1196
 
973
1197
 
974
- def dequantize_with_sz_map(weights_matrix, scale, zero, g_idx):
1198
+ def dequantize_with_sz_map(weights_matrix, scale, zero, g_idx, group_axis=-1):
975
1199
  """Rebuild a dequantized weight matrix from group params.
976
1200
 
977
1201
  This function uses the provided scale and zero tensors to dequantize the
978
- input weights_matrix according to the group indices. It maps each column
979
- of the weights_matrix to its corresponding group parameters and performs
980
- the dequantization operation.
1202
+ input weights_matrix according to the group indices. It maps each position
1203
+ along group_axis of the weights_matrix to its corresponding group
1204
+ parameters and performs the dequantization operation.
981
1205
 
982
1206
  Args:
983
- weights_matrix: 2D tensor of shape [out_features, in_features].
984
- scale: Per-group scale tensor of shape [out_features, n_groups].
985
- zero: Per-group zero-point tensor of shape [out_features, n_groups].
986
- g_idx: Integer tensor of shape [in_features,] mapping each column to
987
- its group index.
988
- maxq: Scalar (float) representing the maximum integer quantization
989
- level (e.g., 2^bits - 1).
1207
+ weights_matrix: Tensor to dequantize.
1208
+ scale: Per-group scale tensor with n_groups along group_axis.
1209
+ zero: Per-group zero-point tensor with n_groups along group_axis.
1210
+ g_idx: 1D integer tensor of length equal to the size of
1211
+ `weights_matrix` along the dimension being dequantized. Each
1212
+ element specifies which group index (0 to n_groups-1) that
1213
+ position belongs to. For example, with 128 columns and
1214
+ group_size=32, g_idx would be
1215
+ `[0,0,...,0, 1,1,...,1, 2,2,...,2, 3,3,...,3]` (32 of each).
1216
+ group_axis: The axis in `scale` and `zero` along which to index
1217
+ using `g_idx`. This determines which dimension of the
1218
+ scale/zero tensors contains the per-group values. Default: -1
1219
+ (last axis).
990
1220
 
991
1221
  Returns:
992
1222
  A tensor with the same shape as `weights_matrix` containing the
@@ -994,12 +1224,12 @@ def dequantize_with_sz_map(weights_matrix, scale, zero, g_idx):
994
1224
  """
995
1225
  # Map group indices to scales and zeros
996
1226
  groups = ops.cast(g_idx, "int32")
997
- scales_mapped = ops.take(scale, groups, axis=1)
998
- zeros_mapped = ops.take(zero, groups, axis=1)
1227
+ scales_mapped = ops.take(scale, groups, axis=group_axis)
1228
+ zeros_mapped = ops.take(zero, groups, axis=group_axis)
999
1229
  zeros_mapped = ops.cast(zeros_mapped, scales_mapped.dtype)
1000
1230
 
1001
- quantized = ops.multiply(
1231
+ dequantized = ops.multiply(
1002
1232
  ops.subtract(weights_matrix, zeros_mapped), scales_mapped
1003
1233
  )
1004
1234
 
1005
- return quantized
1235
+ return dequantized
@@ -121,10 +121,11 @@ def save_model(model, filepath, overwrite=True, zipped=None, **kwargs):
121
121
 
122
122
  @keras_export(["keras.saving.load_model", "keras.models.load_model"])
123
123
  def load_model(filepath, custom_objects=None, compile=True, safe_mode=True):
124
- """Loads a model saved via `model.save()`.
124
+ """Loads a model saved via `model.save()` or from an Orbax checkpoint.
125
125
 
126
126
  Args:
127
- filepath: `str` or `pathlib.Path` object, path to the saved model file.
127
+ filepath: `str` or `pathlib.Path` object, path to the saved model file
128
+ or Orbax checkpoint directory.
128
129
  custom_objects: Optional dictionary mapping names
129
130
  (strings) to custom classes or functions to be
130
131
  considered during deserialization.
@@ -195,6 +196,16 @@ def load_model(filepath, custom_objects=None, compile=True, safe_mode=True):
195
196
  compile=compile,
196
197
  safe_mode=safe_mode,
197
198
  )
199
+
200
+ # Check for Orbax checkpoint directory using utility function
201
+ if is_orbax_checkpoint(filepath):
202
+ return _load_model_from_orbax_checkpoint(
203
+ filepath,
204
+ custom_objects=custom_objects,
205
+ compile=compile,
206
+ safe_mode=safe_mode,
207
+ )
208
+
198
209
  elif str(filepath).endswith(".keras"):
199
210
  raise ValueError(
200
211
  f"File not found: filepath={filepath}. "
@@ -337,3 +348,56 @@ def load_weights(model, filepath, skip_mismatch=False, **kwargs):
337
348
  "`.weights.h5` files, legacy H5 format files "
338
349
  "(`.h5` extension), or Orbax checkpoints."
339
350
  )
351
+
352
+
353
+ def _load_model_from_orbax_checkpoint(
354
+ filepath, custom_objects=None, compile=True, safe_mode=True
355
+ ):
356
+ """Load a model from an Orbax checkpoint directory."""
357
+
358
+ from keras.src.utils.module_utils import ocp
359
+
360
+ # Ensure orbax is available
361
+ ocp.initialize()
362
+
363
+ # Find the latest checkpoint step using the utility function
364
+ checkpoint_path = find_latest_orbax_checkpoint(filepath)
365
+ step = int(os.path.basename(checkpoint_path))
366
+
367
+ # Load the composite state efficiently
368
+ checkpointer = ocp.training.Checkpointer(directory=filepath)
369
+ with ocp.Context():
370
+ composite_state = checkpointer.load_pytree(step)
371
+
372
+ # Validate and extract model config
373
+ if "model_config" not in composite_state:
374
+ raise ValueError(
375
+ "Checkpoint does not contain model configuration. "
376
+ "This checkpoint may have been saved with save_weights_only=True."
377
+ )
378
+
379
+ # Create and build model from config using saving_lib helper
380
+ # This properly handles shared objects and compile_config
381
+ model = saving_lib._model_from_config(
382
+ composite_state["model_config"],
383
+ custom_objects=custom_objects,
384
+ compile=compile,
385
+ safe_mode=safe_mode,
386
+ )
387
+
388
+ # Prepare state tree with only variable keys for set_state_tree
389
+ variable_keys = [
390
+ "trainable_variables",
391
+ "non_trainable_variables",
392
+ "optimizer_variables",
393
+ "metrics_variables",
394
+ ]
395
+ state_tree = {
396
+ key: composite_state[key]
397
+ for key in variable_keys
398
+ if key in composite_state
399
+ }
400
+
401
+ # Apply the loaded state to the model
402
+ model.set_state_tree(state_tree)
403
+ return model
keras/src/version.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from keras.src.api_export import keras_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "3.14.0.dev2026012804"
4
+ __version__ = "3.14.0.dev2026012904"
5
5
 
6
6
 
7
7
  @keras_export("keras.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: keras-nightly
3
- Version: 3.14.0.dev2026012804
3
+ Version: 3.14.0.dev2026012904
4
4
  Summary: Multi-backend Keras
5
5
  Author-email: Keras team <keras-users@googlegroups.com>
6
6
  License: Apache License 2.0
@@ -35,7 +35,7 @@ keras/_tf_keras/keras/datasets/mnist/__init__.py,sha256=tCUxwxWlcOGsTQzgysuC2kVv
35
35
  keras/_tf_keras/keras/datasets/reuters/__init__.py,sha256=aY43YfZVbCRMIRNfycxdKPDjq8BW8MZSA_fYQjonuY0,330
36
36
  keras/_tf_keras/keras/distillation/__init__.py,sha256=7pnduBNlDNCQFxm8yhGeYw8jKjOhK2l-F3iOjIFCmNk,497
37
37
  keras/_tf_keras/keras/distribution/__init__.py,sha256=sPbRKFpWd8mZypqWAId06KJzznVpkT_4Ae5QTcBJxa8,1063
38
- keras/_tf_keras/keras/dtype_policies/__init__.py,sha256=c8mnzGQbn3b5EQ6v6zBnrkgs9EFcrPRi5Zv4TEVcolg,969
38
+ keras/_tf_keras/keras/dtype_policies/__init__.py,sha256=o10diNOYl4_umVihjLtgkBbCqgpKOddw5vWczArATjg,1063
39
39
  keras/_tf_keras/keras/export/__init__.py,sha256=Qtde9Kh4AUm-pBmKL4L90ooJxo5EFVEW8i7LYxA_mVQ,194
40
40
  keras/_tf_keras/keras/initializers/__init__.py,sha256=Bg5r2XRraWXldFSlWNu1kNnp0g1sQt9vfcT8fvmXaeg,3371
41
41
  keras/_tf_keras/keras/layers/__init__.py,sha256=w0Av9SmIfUOFvKiDXV5t7IWfpoagNwLX2hgg3C-pTSs,16237
@@ -57,7 +57,7 @@ keras/_tf_keras/keras/preprocessing/__init__.py,sha256=8gjf16CnMiBJFp2E05iRJaHze
57
57
  keras/_tf_keras/keras/preprocessing/image/__init__.py,sha256=MEyK0JU7piXc1ql8ZTtHJuC907Q6DV5uOQqpivKKTn4,1656
58
58
  keras/_tf_keras/keras/preprocessing/sequence/__init__.py,sha256=TymwLKMEwqR6JhVFDhU80Hf8GVMMwg2vD6-pJqh5NuA,479
59
59
  keras/_tf_keras/keras/preprocessing/text/__init__.py,sha256=g3ej5_e86BY1AhlQwjalIQq_xgCMmCcDMtsh27diUNw,543
60
- keras/_tf_keras/keras/quantizers/__init__.py,sha256=kDf-LP5ola_TxzeClJTqSS42k4mzF-BcP5zh7Xh7miE,1652
60
+ keras/_tf_keras/keras/quantizers/__init__.py,sha256=KBYALSRrerYJSd_Rht8etlSdY5tKo8nAbDgvVrfF9CU,1790
61
61
  keras/_tf_keras/keras/random/__init__.py,sha256=qDZQXrw0oYVNc2KTmcmcgon61lQJBOXqF-6PMInBvec,763
62
62
  keras/_tf_keras/keras/regularizers/__init__.py,sha256=542Shphw7W8h4Dyf2rmqMKUECVZ8IVBvN9g1LWhz-b4,923
63
63
  keras/_tf_keras/keras/saving/__init__.py,sha256=KvL2GZxjvgFgEhvEnkvqjIR9JSNHKz-NWZacXajsjLI,1298
@@ -101,7 +101,7 @@ keras/datasets/mnist/__init__.py,sha256=tCUxwxWlcOGsTQzgysuC2kVvX01zkGOa9ABEb1Ek
101
101
  keras/datasets/reuters/__init__.py,sha256=aY43YfZVbCRMIRNfycxdKPDjq8BW8MZSA_fYQjonuY0,330
102
102
  keras/distillation/__init__.py,sha256=7pnduBNlDNCQFxm8yhGeYw8jKjOhK2l-F3iOjIFCmNk,497
103
103
  keras/distribution/__init__.py,sha256=sPbRKFpWd8mZypqWAId06KJzznVpkT_4Ae5QTcBJxa8,1063
104
- keras/dtype_policies/__init__.py,sha256=c8mnzGQbn3b5EQ6v6zBnrkgs9EFcrPRi5Zv4TEVcolg,969
104
+ keras/dtype_policies/__init__.py,sha256=o10diNOYl4_umVihjLtgkBbCqgpKOddw5vWczArATjg,1063
105
105
  keras/export/__init__.py,sha256=Qtde9Kh4AUm-pBmKL4L90ooJxo5EFVEW8i7LYxA_mVQ,194
106
106
  keras/initializers/__init__.py,sha256=Bg5r2XRraWXldFSlWNu1kNnp0g1sQt9vfcT8fvmXaeg,3371
107
107
  keras/layers/__init__.py,sha256=mteqKdCYQq1NWH-XOYoy1B2uJFmJkG2XPHJIOnUjLOg,16069
@@ -122,13 +122,13 @@ keras/optimizers/schedules/__init__.py,sha256=pQF3rQiAPuUSTUdflTr-fpL77oyGIv9xzG
122
122
  keras/preprocessing/__init__.py,sha256=N-_Rr6pYag2H_kEn6imVuol_hNL3NL65YL_zvdOV1mU,580
123
123
  keras/preprocessing/image/__init__.py,sha256=AmkgEp_-MvtIefySVEXv1IQ5_LyojjBfnIlRcUvNc40,451
124
124
  keras/preprocessing/sequence/__init__.py,sha256=zTMj_m6LWipe_hVq6SjE4JPj7eYKuUOZyh45g756cFg,196
125
- keras/quantizers/__init__.py,sha256=kDf-LP5ola_TxzeClJTqSS42k4mzF-BcP5zh7Xh7miE,1652
125
+ keras/quantizers/__init__.py,sha256=KBYALSRrerYJSd_Rht8etlSdY5tKo8nAbDgvVrfF9CU,1790
126
126
  keras/random/__init__.py,sha256=qDZQXrw0oYVNc2KTmcmcgon61lQJBOXqF-6PMInBvec,763
127
127
  keras/regularizers/__init__.py,sha256=542Shphw7W8h4Dyf2rmqMKUECVZ8IVBvN9g1LWhz-b4,923
128
128
  keras/saving/__init__.py,sha256=KvL2GZxjvgFgEhvEnkvqjIR9JSNHKz-NWZacXajsjLI,1298
129
129
  keras/src/__init__.py,sha256=Gi4S7EiCMkE03PbdGNpFdaUYySWDs_FcAJ8Taz9Y1BE,684
130
130
  keras/src/api_export.py,sha256=gXOkBOnmscV013WAc75lc4Up01-Kkg9EylIAT_QWctg,1173
131
- keras/src/version.py,sha256=19dG5h6eOc-VKk0odlobQO7lPSyHV89NVu3r2Y6bzco,204
131
+ keras/src/version.py,sha256=L6aU5qNyjfVqWFQMoyXdfGkHRVpx3Z4Axvr6FTCf4N8,204
132
132
  keras/src/activations/__init__.py,sha256=0nL3IFDB9unlrMz8ninKOWo-uCHasTUpTo1tXZb2u44,4433
133
133
  keras/src/activations/activations.py,sha256=mogPggtp4CGldI3VOPNmesRxp6EbiR1_i4KLGaVwzL8,17614
134
134
  keras/src/applications/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -163,7 +163,7 @@ keras/src/backend/common/symbolic_scope.py,sha256=RfrfOAv2cbiZai-L6tHwir2WUpJhS6
163
163
  keras/src/backend/common/tensor_attributes.py,sha256=X5sYeGDu9YmVBIn8oX31IeE-v-bxjq2ovmIjLrVOa8g,1161
164
164
  keras/src/backend/common/variables.py,sha256=ENq8lbwCGoIggC3Ef92Ea9G7ej4NZJnomvJZYwp8BaI,24467
165
165
  keras/src/backend/jax/__init__.py,sha256=l_HMwAZ3oAV4Etnw9RPqbvLYPPs3CZYbgaLd_qy36ps,1495
166
- keras/src/backend/jax/core.py,sha256=C6uwztGtL2j0vCDslroAv9P4yTxVxuHfBiSpguNuGeE,22721
166
+ keras/src/backend/jax/core.py,sha256=pwsVWZF63o47StwMdwm9tRQapX_EHNnMt4r8Xe37gIg,23132
167
167
  keras/src/backend/jax/distribution_lib.py,sha256=pwUsC_p_z73-5pG0_nI1tbJ9cqMqd93-ROIteEEbY7w,9258
168
168
  keras/src/backend/jax/export.py,sha256=jV2yKQLzYjK72vTJmdNomWPLeNS_lDTCEKzQx_5D_-E,7368
169
169
  keras/src/backend/jax/image.py,sha256=RiYIalbIaUQdDOGpDZUBk5KNsX94Xqg7iyXGATN9V58,30482
@@ -254,7 +254,7 @@ keras/src/callbacks/lambda_callback.py,sha256=q-nNr_k7MyYRP3HIetFsutcLkq78cUYxDD
254
254
  keras/src/callbacks/learning_rate_scheduler.py,sha256=II0SLxltUX3omRbGTYffd9KTWLRKtzW57SDRe70_t7E,2965
255
255
  keras/src/callbacks/model_checkpoint.py,sha256=Jt2mMKHKx0isrQnhiHADDOmwu72J594m93PBHy-zpV8,18570
256
256
  keras/src/callbacks/monitor_callback.py,sha256=-QBKqkKJ7Rg6L40Q80IScpvybmLoodLWcJoAgnTe_c4,4184
257
- keras/src/callbacks/orbax_checkpoint.py,sha256=hG_OClsm4lYQVTyCLWLJqgdYl6OXtFjz0J6z5eUtsmY,12529
257
+ keras/src/callbacks/orbax_checkpoint.py,sha256=aO138hfFOsU48GiYNYy0r__9DpmpbMxG9CGMX2CSc9Y,13931
258
258
  keras/src/callbacks/progbar_logger.py,sha256=BqddKoOyc8vxxtKriq5QD3n5JhVPUxkuWF2u1UlCriQ,3104
259
259
  keras/src/callbacks/reduce_lr_on_plateau.py,sha256=isJ9EzVo8jIu-_kWTFHpM_gaI5PbHTcUBM0keR9FRHA,4766
260
260
  keras/src/callbacks/remote_monitor.py,sha256=VDbNzCdddCDe_ZoeVvwV50oJkwOehhT_IDDYD8LzFOg,2727
@@ -278,8 +278,8 @@ keras/src/distillation/distillation_loss.py,sha256=3hhDKWhNHoLMa-EaBbJfa0flS6QoU
278
278
  keras/src/distillation/distiller.py,sha256=GI_yJ5RTgdXPEZoQwNe4Ds04UXP7obB0tJTqmUbTOa4,22984
279
279
  keras/src/distribution/__init__.py,sha256=pseLHx387oTmXROr95tU7kNWjPL8-JB4kZs8nUHsOiU,718
280
280
  keras/src/distribution/distribution_lib.py,sha256=zZbKxdL0sHJXSJNCd02qG3sVrq1F3x_JPota9Jlk6iM,34542
281
- keras/src/dtype_policies/__init__.py,sha256=8Ju8ICwTDdBfBrSoL6kmMzrcgMwMg6dPa1UPMDZKTqc,3717
282
- keras/src/dtype_policies/dtype_policy.py,sha256=3e2J589g0NUALz0LycSb3anh47g5LWIgn0jKR26B-Cs,19143
281
+ keras/src/dtype_policies/__init__.py,sha256=0K3I6VkP6XZ60srUSitXA9Z2PxKsYEbJKFv_xs1Gi1I,3804
282
+ keras/src/dtype_policies/dtype_policy.py,sha256=Wfg6P4PRYH7p2QS_TvySpGOjRvcm-G-Q3fsohEwXYfY,21922
283
283
  keras/src/dtype_policies/dtype_policy_map.py,sha256=DqDYlssUGSiTqawPpaVRvR6ljYD8DJrFERCxXVVFvBE,10840
284
284
  keras/src/export/__init__.py,sha256=wQfjXEPN1YO2n0gz-7Eme0y_vq86s3SEWkZgs534sns,366
285
285
  keras/src/export/export_utils.py,sha256=DpfA5yI37gaMjyESxGTlf7aQ8FhYp0u8LQKxyKiFaoU,5585
@@ -324,14 +324,14 @@ keras/src/layers/convolutional/depthwise_conv2d.py,sha256=rnCd_S3UVeNdVotjKW1Wlo
324
324
  keras/src/layers/convolutional/separable_conv1d.py,sha256=vL5qzdaSOOTgyn1A6y9IZZbQOEeB6FedPk9JJI5wqSY,6452
325
325
  keras/src/layers/convolutional/separable_conv2d.py,sha256=ZkLOnA6l5UV3GuJufwlOHMOm1S-xkt6sdF-qmP4PDjw,6533
326
326
  keras/src/layers/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
327
- keras/src/layers/core/dense.py,sha256=oe0vpcYAMeulF39TrQAeSm-PrNafrpqj6GPlOFQ-eq4,40477
328
- keras/src/layers/core/einsum_dense.py,sha256=XM58eGQjLSSPIbOdzFmcehJ12eBSGPdRnZ04Qjzwvqs,69554
329
- keras/src/layers/core/embedding.py,sha256=88x_dkaDlGD-3WtGJf58Us68q2ub8SY36ln9Cz3G75Q,22646
327
+ keras/src/layers/core/dense.py,sha256=JqnencNYXdXt94pG0JUlE_iKY7pviJuZS_TGDl5nKeM,48253
328
+ keras/src/layers/core/einsum_dense.py,sha256=9f8_ipGJKyJpqSvVQhH5UIUi2pUQXfv0Mh6cDCyCDtw,76601
329
+ keras/src/layers/core/embedding.py,sha256=y5IebiVSmVWRTVV0qcwENSSVUC-G2WMtaK79YBgY9e0,30709
330
330
  keras/src/layers/core/identity.py,sha256=o0gLHlXL7eNJEbXIgIsgBsZX97K6jN9n3qPXprkXQ9Y,848
331
331
  keras/src/layers/core/input_layer.py,sha256=RQn1KHtUd6fPBPL9rs46X8KHmr1eGo7moLg8U5tlVl0,8168
332
332
  keras/src/layers/core/lambda_layer.py,sha256=Wplek4hOwh_rwXz4_bpz0pXzKe26ywz52glh5uD0l4w,9272
333
333
  keras/src/layers/core/masking.py,sha256=g-RrZ_P50Surh_KGlZQwy2kPNLsop0F8voU4SG2MQkw,2856
334
- keras/src/layers/core/reversible_embedding.py,sha256=nDQ7b-xi9YlHxYZv0Vif0qr3bfu3LAWR_d1KrDsMawI,16155
334
+ keras/src/layers/core/reversible_embedding.py,sha256=2Cqu4kPC7HoHgDBEaxUuggylZoTdBfNjOSPm3SpxJ84,22446
335
335
  keras/src/layers/core/wrapper.py,sha256=KIdDBuk24V9rAn97-HUUKQ0JMx9Eyd0q9W4qQFaYNt8,1509
336
336
  keras/src/layers/merging/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
337
337
  keras/src/layers/merging/add.py,sha256=icbh3RwZ3QUP3bFNCi7GbrHj2hFdKu1Dsv8djSa13co,2150
@@ -375,7 +375,7 @@ keras/src/layers/pooling/max_pooling3d.py,sha256=xVsJd6KPyu1m9jCVuwT3MZwpwT27TSx
375
375
  keras/src/layers/preprocessing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
376
376
  keras/src/layers/preprocessing/category_encoding.py,sha256=RjgVkFi9og1hHNmDu2DuKVQOO2Yd-LSXKJDSSzG876M,6927
377
377
  keras/src/layers/preprocessing/data_layer.py,sha256=5bO2Pjs-mOzn4z5OnwBbbF3shOczLmimYVym9UL0Mcc,5766
378
- keras/src/layers/preprocessing/discretization.py,sha256=vsBQbIqlore_NlU0LvtqiDriCXguD_CZH7tK91LBhGM,14056
378
+ keras/src/layers/preprocessing/discretization.py,sha256=jggZ3irXWaybPGH6jdmbohHo6krl48II-RtsLYa-0Vk,15154
379
379
  keras/src/layers/preprocessing/feature_space.py,sha256=YcHzD8C5eqUDRF8khLapwc8qCmbJSfix16KyzqEu1II,30568
380
380
  keras/src/layers/preprocessing/hashed_crossing.py,sha256=uwOTKPsv2UweHuGiF4V5HFRgYnjP8N0_S6qT3JP5KeQ,8481
381
381
  keras/src/layers/preprocessing/hashing.py,sha256=3k1L_2d_bROHxZNjDbfURRBSFzFBIHFj0tEXCobcS8w,11188
@@ -527,15 +527,15 @@ keras/src/optimizers/rmsprop.py,sha256=DCbmmViUnYCHMCO9YCtC2wGzPXxNPBJhkpwAmROOz
527
527
  keras/src/optimizers/sgd.py,sha256=_3xanWOI0s2dISxEVT7i_tehsWakQQz2y480Iwkonas,4396
528
528
  keras/src/optimizers/schedules/__init__.py,sha256=vuUuHNTev8sD2-swsuq7zqyYbmaOhDyiIE6F3dGGSZU,546
529
529
  keras/src/optimizers/schedules/learning_rate_schedule.py,sha256=WI5QuaWFsEFJhRfLy0KCmkxKwGBMnmgMLYsWC_4YbCo,35828
530
- keras/src/quantizers/__init__.py,sha256=3LlZ1Z5G5mYVdrZ2xnoFgW28OFneYc_Ys2dzuJ3S0nk,2459
530
+ keras/src/quantizers/__init__.py,sha256=3nlkoq0aJsR-6OA23MA4WAjebCOIXT4ZH73JPg69esQ,2761
531
531
  keras/src/quantizers/awq.py,sha256=i7loWty9LEzfP04_FAyrRbKEXShkoQeScLNSuxRVKu8,13334
532
532
  keras/src/quantizers/awq_config.py,sha256=jPD8-SRmWn_uHd1YtUEfI6V8fxmylOF8btUJMerVaEs,5701
533
533
  keras/src/quantizers/awq_core.py,sha256=AJgbySMvSwENM1u-e08qb00mX5ub3egUrs677UdYKeQ,7640
534
534
  keras/src/quantizers/gptq.py,sha256=ph6e-mzjxD0gGn98KiDS48muiScnfFvLnKFCbo1Ik7o,20123
535
535
  keras/src/quantizers/gptq_config.py,sha256=zpPWsbfAdYZArhk_alSnFK-nBj92bdJZBzkSM1MKl5g,8925
536
536
  keras/src/quantizers/gptq_core.py,sha256=EKhdTXZQ1uo45KYJcO0h9bMTbVZH4pmqs4irQy9r47o,16945
537
- keras/src/quantizers/quantization_config.py,sha256=8jGV1lzLC-gk37U4By2ol0QJ9T3LNuMynet40xWlxjg,8117
538
- keras/src/quantizers/quantizers.py,sha256=QzImbGjVaa6pISxhOJPwq7seQC4s_EToI5JHKp0gbkk,36698
537
+ keras/src/quantizers/quantization_config.py,sha256=sdYwrwO-KGHVKYbbzIMI6tbKJ80CDT9GzCBqOWSVACg,12026
538
+ keras/src/quantizers/quantizers.py,sha256=4K4Sh6EqHQ4atFn6sDsEce3WKX1e0iZaGGaZgCaB1WE,45390
539
539
  keras/src/quantizers/utils.py,sha256=i6e5MobXrQeKA6zFenjzUNoDDWRGL9bcfgdbE_-0IeM,672
540
540
  keras/src/random/__init__.py,sha256=BmXVYPzxbhADohoLtAEEzB3cesP7YBFDsp1qc6BWWlg,420
541
541
  keras/src/random/random.py,sha256=bUADZIVDuCghwIWTk0qBxXTxUdiNGWIdsRi8QJ3ePg4,17581
@@ -547,7 +547,7 @@ keras/src/saving/file_editor.py,sha256=mNlXakl58wPC7nWRAVTh-esQ0UN2x-q5piI2ZXYQ8
547
547
  keras/src/saving/keras_saveable.py,sha256=aGIt1ajtsaamfUq18LM6ql8JEoQzi3HwzJEuwQ9bmKE,1285
548
548
  keras/src/saving/object_registration.py,sha256=OOO-7-SNfPoFkFsR_c5jzE6aSIDIlHlnMcm9IlI_Gbs,7357
549
549
  keras/src/saving/orbax_util.py,sha256=ArJI9hQODUyyvzCiXt8AS3VH6E4SL0vF02-RHBk30gU,1621
550
- keras/src/saving/saving_api.py,sha256=PMkxXhtNNKX8GlwIsCP8-Plt19M012wNEk7i8BhxWzo,12670
550
+ keras/src/saving/saving_api.py,sha256=X-zsTum57M3AWf_cYqhq_o5wgLcFxNh1ditoxbL_0LY,14694
551
551
  keras/src/saving/saving_lib.py,sha256=bRI8TeNOlflTfX3njSkkwNv-VYip-OW7ienIm0lL96I,58920
552
552
  keras/src/saving/serialization_lib.py,sha256=yzCTm8hin__MGA2N5M5F-8Zbts5ZJVmINbrH4wEtIwI,30334
553
553
  keras/src/testing/__init__.py,sha256=7vVsV7Rn3rG99DdURgnH8ncpxagRwIE0uhH-R4qDyok,315
@@ -618,7 +618,7 @@ keras/utils/bounding_boxes/__init__.py,sha256=jtvQll4u8ZY0Z96HwNhP1nxWEG9FM3gI-6
618
618
  keras/utils/legacy/__init__.py,sha256=oSYZz6uS8UxSElRaaJYWJEoweJ4GAasZjnn7fNaOlog,342
619
619
  keras/visualization/__init__.py,sha256=UKWmiy6sps4SWlmQi9WX8_Z53cPpLlphz2zIeHdwJpQ,722
620
620
  keras/wrappers/__init__.py,sha256=QkS-O5K8qGS7C3sytF8MpmO6PasATpNVGF8qtb7Ojsw,407
621
- keras_nightly-3.14.0.dev2026012804.dist-info/METADATA,sha256=PHIKEROkWa1oJK381wl7eaROrHFpk2-e2N8yNGMRLfI,6339
622
- keras_nightly-3.14.0.dev2026012804.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
623
- keras_nightly-3.14.0.dev2026012804.dist-info/top_level.txt,sha256=ptcw_-QuGZ4ZDjMdwi_Z0clZm8QAqFdvzzFnDEOTs9o,6
624
- keras_nightly-3.14.0.dev2026012804.dist-info/RECORD,,
621
+ keras_nightly-3.14.0.dev2026012904.dist-info/METADATA,sha256=0pSnPO1OPi8catYTDRABuendG3SeP4dMURreOSKYJRQ,6339
622
+ keras_nightly-3.14.0.dev2026012904.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
623
+ keras_nightly-3.14.0.dev2026012904.dist-info/top_level.txt,sha256=ptcw_-QuGZ4ZDjMdwi_Z0clZm8QAqFdvzzFnDEOTs9o,6
624
+ keras_nightly-3.14.0.dev2026012904.dist-info/RECORD,,