keras-nightly 3.14.0.dev2026011404__py3-none-any.whl → 3.14.0.dev2026011504__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,217 @@
1
+ """AWQ core functionality for layer-wise quantization.
2
+
3
+ This module provides the orchestration logic for applying AWQ quantization
4
+ to transformer models in a layer-by-layer fashion.
5
+ """
6
+
7
+ from contextlib import contextmanager
8
+
9
+ from absl import logging
10
+
11
+ from keras.src import ops
12
+ from keras.src import utils as keras_utils
13
+ from keras.src.dtype_policies.dtype_policy import AWQDTypePolicy
14
+ from keras.src.dtype_policies.dtype_policy_map import DTypePolicyMap
15
+ from keras.src.quantizers.awq import AWQ
16
+ from keras.src.quantizers.awq_config import AWQConfig
17
+ from keras.src.quantizers.gptq_core import find_layers_in_block
18
+ from keras.src.quantizers.gptq_core import get_dataloader
19
+ from keras.src.quantizers.utils import should_quantize_layer
20
+
21
+
22
+ @contextmanager
23
+ def stream_activations(layers_map, awq_objects):
24
+ """Context manager to capture activations for AWQ calibration.
25
+
26
+ Temporarily patches layer.call methods to capture activation statistics
27
+ for computing per-channel scaling factors.
28
+
29
+ Args:
30
+ layers_map: Dict[str, Layer]. Mapping from layer names to layers.
31
+ awq_objects: Dict[str, AWQ]. Mapping from names to AWQ instances.
32
+
33
+ Yields:
34
+ None: The patched state is active only within the `with` block.
35
+ """
36
+ original_calls = {}
37
+
38
+ def create_hook(name, original_call_func):
39
+ def hook(*args, **kwargs):
40
+ inp = args[0] if args else kwargs["inputs"]
41
+ num_features = awq_objects[name].rows
42
+ input_2d = ops.reshape(inp, (-1, num_features))
43
+ awq_objects[name].update_activation_magnitudes(input_2d)
44
+ return original_call_func(*args, **kwargs)
45
+
46
+ return hook
47
+
48
+ try:
49
+ for name, layer in layers_map.items():
50
+ original_calls[name] = layer.call
51
+ layer.call = create_hook(name, layer.call)
52
+ yield
53
+ finally:
54
+ for name, layer in layers_map.items():
55
+ layer.call = original_calls[name]
56
+
57
+
58
+ def apply_awq_layerwise(dataloader, config, structure, filters=None):
59
+ """Apply AWQ quantization layer-by-layer to a Keras model.
60
+
61
+ This function processes the model sequentially, one block at a time:
62
+ 1. Captures activation statistics through calibration data forward pass
63
+ 2. Uses activation magnitudes to determine weight saliency
64
+ 3. Finds optimal per-channel scales via grid search
65
+ 4. Quantizes weights with AWQ scaling
66
+
67
+ Args:
68
+ dataloader: Calibration data as numpy array.
69
+ config: AWQConfig instance.
70
+ structure: Dict with 'pre_block_layers' and 'sequential_blocks'.
71
+ filters: Optional layer filters.
72
+ """
73
+ num_samples = config.num_samples
74
+ logging.info("Starting AWQ quantization...")
75
+
76
+ pre_layers = structure.get("pre_block_layers", [])
77
+ transformer_blocks = structure.get("sequential_blocks", [])
78
+
79
+ if not transformer_blocks:
80
+ raise ValueError(
81
+ "No sequential blocks found in the structure to quantize."
82
+ )
83
+
84
+ # Process inputs through pre-block layers (e.g., embedding)
85
+ inputs = []
86
+ for batch in dataloader:
87
+ batch = ops.convert_to_tensor(batch, dtype="int32")
88
+ for layer in pre_layers:
89
+ batch = layer(batch)
90
+ inputs.append(batch)
91
+
92
+ num_samples = min(num_samples, len(inputs))
93
+ progbar = keras_utils.Progbar(target=len(transformer_blocks))
94
+
95
+ for block_idx, block in enumerate(transformer_blocks):
96
+ logging.info(f"Quantizing Block {block_idx}")
97
+ sub_layers_map = find_layers_in_block(block)
98
+
99
+ # Apply filters
100
+ final_sub_layers_map = {}
101
+ for name, layer in sub_layers_map.items():
102
+ if not should_quantize_layer(layer, filters):
103
+ continue
104
+ final_sub_layers_map[name] = layer
105
+
106
+ sub_layers_map = final_sub_layers_map
107
+
108
+ if not sub_layers_map:
109
+ logging.info(
110
+ f" No quantizable layers found in block {block_idx}. Skipping."
111
+ )
112
+ else:
113
+ logging.info(f"Found layers: {list(sub_layers_map.keys())}")
114
+
115
+ # Create AWQ objects for each layer
116
+ awq_objects = {
117
+ name: AWQ(layer, config)
118
+ for name, layer in sub_layers_map.items()
119
+ }
120
+
121
+ # Capture activation statistics
122
+ with stream_activations(sub_layers_map, awq_objects):
123
+ for sample_idx in range(num_samples):
124
+ current_input = inputs[sample_idx]
125
+ if len(current_input.shape) == 2:
126
+ current_input = ops.expand_dims(current_input, axis=0)
127
+ _ = block(current_input)
128
+
129
+ # Quantize each layer
130
+ for name, awq_object in awq_objects.items():
131
+ logging.info(f"Quantizing {name}...")
132
+ awq_object.quantize_layer()
133
+ awq_object.free()
134
+
135
+ del awq_objects
136
+
137
+ # Generate inputs for next block
138
+ if block_idx < len(transformer_blocks) - 1:
139
+ logging.info(f"Generating inputs for block {block_idx + 1}...")
140
+ next_block_inputs = []
141
+ for sample_idx in range(num_samples):
142
+ current_input = inputs[sample_idx]
143
+ if len(current_input.shape) == 2:
144
+ current_input = ops.expand_dims(current_input, axis=0)
145
+ output = block(current_input)[0]
146
+ next_block_inputs.append(output)
147
+ inputs = next_block_inputs
148
+
149
+ progbar.update(current=block_idx + 1)
150
+
151
+ logging.info("AWQ quantization complete.")
152
+
153
+
154
+ def awq_quantize(config, quantization_layer_structure, filters=None):
155
+ """Main entry point for AWQ quantization.
156
+
157
+ Args:
158
+ config: AWQConfig instance.
159
+ quantization_layer_structure: Model structure dictionary.
160
+ filters: Optional layer filters.
161
+ """
162
+ if config.dataset is None or config.tokenizer is None:
163
+ raise ValueError(
164
+ "AWQ quantization requires a dataset and tokenizer. "
165
+ "Please provide them in the AWQConfig."
166
+ )
167
+
168
+ if quantization_layer_structure is None:
169
+ raise ValueError(
170
+ "For 'awq' mode, a valid quantization structure must be provided "
171
+ "either via `config.quantization_layer_structure` or by overriding "
172
+ "`model.get_quantization_layer_structure(mode)`. The structure "
173
+ "should be a dictionary with keys 'pre_block_layers' and "
174
+ "'sequential_blocks'."
175
+ )
176
+
177
+ # Load calibration data
178
+ dataloader = get_dataloader(
179
+ config.tokenizer,
180
+ config.sequence_length,
181
+ config.dataset,
182
+ num_samples=config.num_samples,
183
+ )
184
+
185
+ apply_awq_layerwise(
186
+ dataloader[: config.num_samples],
187
+ config,
188
+ quantization_layer_structure,
189
+ filters=filters,
190
+ )
191
+
192
+
193
+ def get_group_size_for_layer(layer, config):
194
+ """Get group size from config or dtype policy.
195
+
196
+ Args:
197
+ layer: The layer to get group size for.
198
+ config: Optional AWQConfig instance.
199
+
200
+ Returns:
201
+ int: The group size for quantization.
202
+
203
+ Raises:
204
+ ValueError: If group size cannot be determined.
205
+ """
206
+ if config and isinstance(config, AWQConfig):
207
+ return config.group_size
208
+ elif isinstance(layer.dtype_policy, AWQDTypePolicy):
209
+ return layer.dtype_policy.group_size
210
+ elif isinstance(layer.dtype_policy, DTypePolicyMap):
211
+ policy = layer.dtype_policy[layer.path]
212
+ if isinstance(policy, AWQDTypePolicy):
213
+ return policy.group_size
214
+ raise ValueError(
215
+ "For AWQ quantization, group_size must be specified "
216
+ "through AWQConfig or AWQDTypePolicy."
217
+ )
@@ -1,5 +1,4 @@
1
1
  import types
2
- from functools import partial
3
2
 
4
3
  from keras.src import ops
5
4
  from keras.src import quantizers
@@ -466,7 +465,7 @@ class GPTQ:
466
465
  group_size=self.config.group_size,
467
466
  activation_order=self.config.activation_order,
468
467
  order_metric=ops.diagonal(hessian_matrix),
469
- compute_scale_zero=partial(self.quantizer.find_params, weight=True),
468
+ compute_scale_zero=self.quantizer.find_params,
470
469
  )
471
470
  quantized = ops.cast(
472
471
  quantized, self.original_layer.quantized_kernel.dtype
@@ -131,7 +131,7 @@ def get_dataloader(
131
131
  pieces = []
132
132
  if isinstance(dataset_list[0], str):
133
133
  for i, s in enumerate(dataset_list):
134
- toks = np.asarray(tokenizer.tokenize(s)).reshape(-1)
134
+ toks = ops.convert_to_numpy(tokenizer.tokenize(s)).reshape(-1)
135
135
  pieces.append(toks)
136
136
  # avoid windows that span document boundaries
137
137
  if eos_id is not None and i < len(dataset_list) - 1:
@@ -182,6 +182,11 @@ def validate_and_resolve_config(mode, config):
182
182
  "For GPTQ, you must pass a `GPTQConfig` object in the "
183
183
  "`config` argument."
184
184
  )
185
+ elif mode == "awq":
186
+ raise ValueError(
187
+ "For AWQ, you must pass an `AWQConfig` object in the "
188
+ "`config` argument."
189
+ )
185
190
  else:
186
191
  if mode is not None:
187
192
  raise ValueError(
@@ -220,6 +225,15 @@ def validate_and_resolve_config(mode, config):
220
225
  f"`GPTQConfig`. Received: {type(config)}"
221
226
  )
222
227
 
228
+ if mode == "awq":
229
+ from keras.src.quantizers.awq_config import AWQConfig
230
+
231
+ if not isinstance(config, AWQConfig):
232
+ raise ValueError(
233
+ "Mode 'awq' requires a valid `config` argument of type "
234
+ f"`AWQConfig`. Received: {type(config)}"
235
+ )
236
+
223
237
  return config
224
238
 
225
239
 
@@ -653,11 +653,14 @@ def unpack_int4(packed, orig_len, axis=0, dtype="int8"):
653
653
  )
654
654
 
655
655
  def to_signed(x):
656
- """Converts unpacked nibbles [0, 15] to signed int4 [-8, 7]."""
656
+ """Converts unpacked nibbles [0, 15] to signed int4 [-8, 7].
657
+
658
+ Uses a branchless XOR approach: (x ^ 8) - 8
659
+ This maps: 0->0, 1->1, ..., 7->7, 8->-8, 9->-7, ..., 15->-1
660
+ """
657
661
  dtype_x = backend.standardize_dtype(x.dtype)
658
662
  eight = ops.cast(8, dtype_x)
659
- sixteen = ops.cast(16, dtype_x)
660
- return ops.where(x < eight, x, x - sixteen)
663
+ return ops.subtract(ops.bitwise_xor(x, eight), eight)
661
664
 
662
665
  rank = getattr(packed.shape, "rank", None) or len(packed.shape)
663
666
  if axis < 0:
@@ -748,7 +751,7 @@ class GPTQQuantizer(Quantizer):
748
751
  self.zero = None
749
752
  self.maxq = None
750
753
 
751
- def find_params(self, input_tensor, weight=True):
754
+ def find_params(self, input_tensor):
752
755
  """Finds quantization parameters (scale and zero) for a given tensor."""
753
756
  self.scale, self.zero, self.maxq = compute_quantization_parameters(
754
757
  input_tensor,
@@ -756,7 +759,6 @@ class GPTQQuantizer(Quantizer):
756
759
  symmetric=self.symmetric,
757
760
  per_channel=self.per_channel,
758
761
  group_size=self.group_size,
759
- weight=weight,
760
762
  compute_dtype=self.compute_dtype,
761
763
  )
762
764
  return self.scale, self.zero, self.maxq
@@ -793,98 +795,105 @@ def compute_quantization_parameters(
793
795
  symmetric=False,
794
796
  per_channel=False,
795
797
  group_size=-1,
796
- weight=False,
797
798
  compute_dtype="float32",
798
799
  ):
799
800
  """
800
- Computes the scale and zero-point for quantization.
801
+ Computes the scale and zero-point for quantizing weight tensors.
801
802
 
802
803
  This function calculates the scale and zero-point required for quantizing
803
- a given tensor `x` based on the specified parameters. It supports grouped,
804
- per-channel, per-tensor, symmetric, and asymmetric quantization - along
805
- with any combinations of these.
804
+ a given weight tensor `x` based on the specified parameters. It supports
805
+ grouped, per-channel, per-tensor, symmetric, and asymmetric quantization.
806
+
807
+ For grouped quantization (per_channel=True, group_size > 0), the output
808
+ shapes are [out_features, n_groups] where n_groups is the number of groups
809
+ along the in_features dimension.
806
810
 
807
811
  Args:
808
- x: KerasTensor. The input tensor to quantize.
812
+ x: KerasTensor. The weight tensor to quantize with shape
813
+ [out_features, in_features].
809
814
  bits: int. The number of bits to quantize to (e.g., 4).
810
815
  symmetric: bool. Whether to use symmetric quantization.
811
816
  per_channel: bool. Whether to quantize per channel.
812
- group_size: int. The group size for quantization.
813
- weight: bool. Whether the input tensor is a weight tensor.
817
+ group_size: int. The group size for quantization. -1 means no grouping.
818
+ compute_dtype: str. The dtype for computation. Defaults to "float32".
814
819
 
815
820
  Returns:
816
821
  scale: KerasTensor. The scale tensor for quantization.
817
822
  zero: KerasTensor. The zero tensor for quantization.
818
823
  maxq: scalar. The maximum quantization value.
819
824
  """
825
+ # Input validation
820
826
  if x is None:
821
827
  raise ValueError(f"Input tensor {x} cannot be None.")
822
-
823
- # For weights, we typically expect at least a 2D tensor.
824
- if weight and len(x.shape) < 2:
828
+ if len(x.shape) < 2:
825
829
  raise ValueError(
826
830
  f"Input weight tensor {x} must have a rank of at "
827
831
  f"least 2, but got rank {len(x.shape)}."
828
832
  )
829
-
830
833
  if ops.size(x) == 0:
831
834
  raise ValueError("Input tensor 'x' cannot be empty.")
832
835
 
833
- original_shape = x.shape
834
-
835
- if per_channel:
836
- if weight:
837
- if group_size != -1:
838
- input_reshaped = ops.reshape(x, [-1, group_size])
839
- else:
840
- input_reshaped = ops.reshape(x, [original_shape[0], -1])
841
- else: # per-tensor
842
- input_reshaped = ops.reshape(x, [1, -1])
836
+ out_features, in_features = x.shape[0], x.shape[1]
843
837
 
844
- # Find min/max values
845
- min_values = ops.min(input_reshaped, axis=1)
846
- max_values = ops.max(input_reshaped, axis=1)
838
+ # Determine number of groups for quantization
839
+ if per_channel and group_size > 0:
840
+ n_groups = (in_features + group_size - 1) // group_size
841
+ else:
842
+ n_groups = 1
843
+
844
+ # Compute min/max values based on quantization mode
845
+ if n_groups > 1:
846
+ # Grouped quantization: output shape [out_features, n_groups]
847
+ remainder = in_features % group_size
848
+ if remainder != 0:
849
+ pad_size = group_size - remainder
850
+ x = ops.pad(x, [[0, 0], [0, pad_size]], constant_values=0.0)
851
+
852
+ x_grouped = ops.reshape(x, [out_features, n_groups, group_size])
853
+ min_values = ops.min(x_grouped, axis=2)
854
+ max_values = ops.max(x_grouped, axis=2)
855
+ else:
856
+ # Per-channel or per-tensor: compute stats along rows
857
+ reduction_shape = [out_features, -1] if per_channel else [1, -1]
858
+ x_reshaped = ops.reshape(x, reduction_shape)
859
+ min_values = ops.min(x_reshaped, axis=1)
860
+ max_values = ops.max(x_reshaped, axis=1)
847
861
 
848
- # Apply symmetric quantization logic if enabled
862
+ # Symmetric quantization: make range symmetric around zero
849
863
  if symmetric:
850
- max_values = ops.maximum(ops.abs(min_values), max_values)
864
+ max_abs = ops.maximum(ops.abs(min_values), max_values)
851
865
  min_values = ops.where(
852
- ops.less(min_values, 0), ops.negative(max_values), min_values
866
+ ops.less(min_values, 0), ops.negative(max_abs), min_values
853
867
  )
868
+ max_values = max_abs
854
869
 
855
- # Ensure range is not zero to avoid division errors
870
+ # Ensure non-zero range to avoid division errors
856
871
  zero_range = ops.equal(min_values, max_values)
857
872
  min_values = ops.where(zero_range, ops.subtract(min_values, 1), min_values)
858
873
  max_values = ops.where(zero_range, ops.add(max_values, 1), max_values)
859
874
 
875
+ # Compute scale and zero-point
860
876
  maxq = ops.cast(ops.subtract(ops.power(2, bits), 1), compute_dtype)
861
-
862
- # Calculate scale and zero-point
863
877
  scale = ops.divide(ops.subtract(max_values, min_values), maxq)
878
+ scale = ops.where(ops.less_equal(scale, 0), 1e-8, scale)
879
+
864
880
  if symmetric:
865
881
  zero = ops.full_like(scale, ops.divide(ops.add(maxq, 1), 2))
866
882
  else:
867
883
  zero = ops.round(ops.divide(ops.negative(min_values), scale))
868
884
 
869
- # Ensure scale is non-zero
870
- scale = ops.where(ops.less_equal(scale, 0), 1e-8, scale)
871
-
872
- if weight:
873
- # Per-channel, non-grouped case: simple reshape is correct.
874
- if per_channel and group_size == -1:
875
- scale = ops.reshape(scale, [-1, 1])
876
- zero = ops.reshape(zero, [-1, 1])
877
- elif not per_channel:
878
- num_rows = original_shape[0]
879
- scale = ops.tile(ops.reshape(scale, (1, 1)), (num_rows, 1))
880
- zero = ops.tile(ops.reshape(zero, (1, 1)), (num_rows, 1))
881
- if per_channel:
885
+ # Reshape output to [out_features, n_groups] or [out_features, 1]
886
+ if n_groups > 1:
887
+ pass # Already [out_features, n_groups]
888
+ elif per_channel:
882
889
  scale = ops.reshape(scale, [-1, 1])
883
890
  zero = ops.reshape(zero, [-1, 1])
891
+ else:
892
+ # Per-tensor: tile single value to [out_features, 1]
893
+ scale = ops.tile(ops.reshape(scale, (1, 1)), (out_features, 1))
894
+ zero = ops.tile(ops.reshape(zero, (1, 1)), (out_features, 1))
884
895
 
885
- zero = ops.cast(zero, "uint8")
886
-
887
- return scale, zero, maxq
896
+ return scale, ops.cast(zero, "uint8"), maxq
888
897
 
889
898
 
890
899
  def quantize_with_zero_point(input_tensor, scale, zero, maxq):
keras/src/version.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from keras.src.api_export import keras_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "3.14.0.dev2026011404"
4
+ __version__ = "3.14.0.dev2026011504"
5
5
 
6
6
 
7
7
  @keras_export("keras.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: keras-nightly
3
- Version: 3.14.0.dev2026011404
3
+ Version: 3.14.0.dev2026011504
4
4
  Summary: Multi-backend Keras
5
5
  Author-email: Keras team <keras-users@googlegroups.com>
6
6
  License: Apache License 2.0
@@ -35,7 +35,7 @@ keras/_tf_keras/keras/datasets/mnist/__init__.py,sha256=tCUxwxWlcOGsTQzgysuC2kVv
35
35
  keras/_tf_keras/keras/datasets/reuters/__init__.py,sha256=aY43YfZVbCRMIRNfycxdKPDjq8BW8MZSA_fYQjonuY0,330
36
36
  keras/_tf_keras/keras/distillation/__init__.py,sha256=7pnduBNlDNCQFxm8yhGeYw8jKjOhK2l-F3iOjIFCmNk,497
37
37
  keras/_tf_keras/keras/distribution/__init__.py,sha256=sPbRKFpWd8mZypqWAId06KJzznVpkT_4Ae5QTcBJxa8,1063
38
- keras/_tf_keras/keras/dtype_policies/__init__.py,sha256=6Qd7KpNMMtqO5FV__5Vte12ig0fK0_j3jKCF6nQUarI,877
38
+ keras/_tf_keras/keras/dtype_policies/__init__.py,sha256=c8mnzGQbn3b5EQ6v6zBnrkgs9EFcrPRi5Zv4TEVcolg,969
39
39
  keras/_tf_keras/keras/export/__init__.py,sha256=Qtde9Kh4AUm-pBmKL4L90ooJxo5EFVEW8i7LYxA_mVQ,194
40
40
  keras/_tf_keras/keras/initializers/__init__.py,sha256=Bg5r2XRraWXldFSlWNu1kNnp0g1sQt9vfcT8fvmXaeg,3371
41
41
  keras/_tf_keras/keras/layers/__init__.py,sha256=w0Av9SmIfUOFvKiDXV5t7IWfpoagNwLX2hgg3C-pTSs,16237
@@ -57,7 +57,7 @@ keras/_tf_keras/keras/preprocessing/__init__.py,sha256=8gjf16CnMiBJFp2E05iRJaHze
57
57
  keras/_tf_keras/keras/preprocessing/image/__init__.py,sha256=MEyK0JU7piXc1ql8ZTtHJuC907Q6DV5uOQqpivKKTn4,1656
58
58
  keras/_tf_keras/keras/preprocessing/sequence/__init__.py,sha256=TymwLKMEwqR6JhVFDhU80Hf8GVMMwg2vD6-pJqh5NuA,479
59
59
  keras/_tf_keras/keras/preprocessing/text/__init__.py,sha256=g3ej5_e86BY1AhlQwjalIQq_xgCMmCcDMtsh27diUNw,543
60
- keras/_tf_keras/keras/quantizers/__init__.py,sha256=S5jh1Gir4TVjUwFcpnQZta9gH6hxkuLlsGmVRPItRVs,1585
60
+ keras/_tf_keras/keras/quantizers/__init__.py,sha256=kDf-LP5ola_TxzeClJTqSS42k4mzF-BcP5zh7Xh7miE,1652
61
61
  keras/_tf_keras/keras/random/__init__.py,sha256=qDZQXrw0oYVNc2KTmcmcgon61lQJBOXqF-6PMInBvec,763
62
62
  keras/_tf_keras/keras/regularizers/__init__.py,sha256=542Shphw7W8h4Dyf2rmqMKUECVZ8IVBvN9g1LWhz-b4,923
63
63
  keras/_tf_keras/keras/saving/__init__.py,sha256=KvL2GZxjvgFgEhvEnkvqjIR9JSNHKz-NWZacXajsjLI,1298
@@ -101,7 +101,7 @@ keras/datasets/mnist/__init__.py,sha256=tCUxwxWlcOGsTQzgysuC2kVvX01zkGOa9ABEb1Ek
101
101
  keras/datasets/reuters/__init__.py,sha256=aY43YfZVbCRMIRNfycxdKPDjq8BW8MZSA_fYQjonuY0,330
102
102
  keras/distillation/__init__.py,sha256=7pnduBNlDNCQFxm8yhGeYw8jKjOhK2l-F3iOjIFCmNk,497
103
103
  keras/distribution/__init__.py,sha256=sPbRKFpWd8mZypqWAId06KJzznVpkT_4Ae5QTcBJxa8,1063
104
- keras/dtype_policies/__init__.py,sha256=6Qd7KpNMMtqO5FV__5Vte12ig0fK0_j3jKCF6nQUarI,877
104
+ keras/dtype_policies/__init__.py,sha256=c8mnzGQbn3b5EQ6v6zBnrkgs9EFcrPRi5Zv4TEVcolg,969
105
105
  keras/export/__init__.py,sha256=Qtde9Kh4AUm-pBmKL4L90ooJxo5EFVEW8i7LYxA_mVQ,194
106
106
  keras/initializers/__init__.py,sha256=Bg5r2XRraWXldFSlWNu1kNnp0g1sQt9vfcT8fvmXaeg,3371
107
107
  keras/layers/__init__.py,sha256=mteqKdCYQq1NWH-XOYoy1B2uJFmJkG2XPHJIOnUjLOg,16069
@@ -122,13 +122,13 @@ keras/optimizers/schedules/__init__.py,sha256=pQF3rQiAPuUSTUdflTr-fpL77oyGIv9xzG
122
122
  keras/preprocessing/__init__.py,sha256=N-_Rr6pYag2H_kEn6imVuol_hNL3NL65YL_zvdOV1mU,580
123
123
  keras/preprocessing/image/__init__.py,sha256=AmkgEp_-MvtIefySVEXv1IQ5_LyojjBfnIlRcUvNc40,451
124
124
  keras/preprocessing/sequence/__init__.py,sha256=zTMj_m6LWipe_hVq6SjE4JPj7eYKuUOZyh45g756cFg,196
125
- keras/quantizers/__init__.py,sha256=S5jh1Gir4TVjUwFcpnQZta9gH6hxkuLlsGmVRPItRVs,1585
125
+ keras/quantizers/__init__.py,sha256=kDf-LP5ola_TxzeClJTqSS42k4mzF-BcP5zh7Xh7miE,1652
126
126
  keras/random/__init__.py,sha256=qDZQXrw0oYVNc2KTmcmcgon61lQJBOXqF-6PMInBvec,763
127
127
  keras/regularizers/__init__.py,sha256=542Shphw7W8h4Dyf2rmqMKUECVZ8IVBvN9g1LWhz-b4,923
128
128
  keras/saving/__init__.py,sha256=KvL2GZxjvgFgEhvEnkvqjIR9JSNHKz-NWZacXajsjLI,1298
129
129
  keras/src/__init__.py,sha256=Gi4S7EiCMkE03PbdGNpFdaUYySWDs_FcAJ8Taz9Y1BE,684
130
130
  keras/src/api_export.py,sha256=gXOkBOnmscV013WAc75lc4Up01-Kkg9EylIAT_QWctg,1173
131
- keras/src/version.py,sha256=w8F4qXHXwmNxkt7YJdj-CSjcFul2E8ddmb8Thwa2k5c,204
131
+ keras/src/version.py,sha256=9qMnmtF-qZTXfhu_aYy3T9wAf18rbTFfpRqEiny8QSU,204
132
132
  keras/src/activations/__init__.py,sha256=0nL3IFDB9unlrMz8ninKOWo-uCHasTUpTo1tXZb2u44,4433
133
133
  keras/src/activations/activations.py,sha256=mogPggtp4CGldI3VOPNmesRxp6EbiR1_i4KLGaVwzL8,17614
134
134
  keras/src/applications/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -198,7 +198,7 @@ keras/src/backend/openvino/layer.py,sha256=5RdvaH1yOyPAphjKiuQAK1H_yZFYKE1Hp7c5b
198
198
  keras/src/backend/openvino/linalg.py,sha256=L6a4MFGND2wWzPVCh44cwuOgkcC4wJTo8Xy3HwW04lg,1614
199
199
  keras/src/backend/openvino/math.py,sha256=qw9kX2sJ2qr0dBJF12Ey0E2GcwixPUqoev6UcNra4NI,3944
200
200
  keras/src/backend/openvino/nn.py,sha256=zULPxdwVO7JDZUUtsuoEEPCLQ09ew8z8T6G_i_NEqrM,23741
201
- keras/src/backend/openvino/numpy.py,sha256=iRzcXqc8Aq_sLh5TgFiLCtgHLfotvI-S29KGwocIN68,103924
201
+ keras/src/backend/openvino/numpy.py,sha256=sSPcjNoUxP9qsIEOf3m37I8rqV0MrGPtPEijeQpp0yk,105535
202
202
  keras/src/backend/openvino/random.py,sha256=4hRUtIP6qJxO3Qy9uH1x6jSuJna3nWPdUf4x2QU8-ew,5575
203
203
  keras/src/backend/openvino/rnn.py,sha256=ErmuZLPSgG9qU-NfYPPvBZ6Ysy8k-fA4g19Vhqq7OVQ,866
204
204
  keras/src/backend/openvino/trainer.py,sha256=bMmtSALqydqdS6ke-5sYW5fgxZDshDH810p_C0xCRTg,9087
@@ -278,8 +278,8 @@ keras/src/distillation/distillation_loss.py,sha256=3hhDKWhNHoLMa-EaBbJfa0flS6QoU
278
278
  keras/src/distillation/distiller.py,sha256=GI_yJ5RTgdXPEZoQwNe4Ds04UXP7obB0tJTqmUbTOa4,22984
279
279
  keras/src/distribution/__init__.py,sha256=pseLHx387oTmXROr95tU7kNWjPL8-JB4kZs8nUHsOiU,718
280
280
  keras/src/distribution/distribution_lib.py,sha256=zZbKxdL0sHJXSJNCd02qG3sVrq1F3x_JPota9Jlk6iM,34542
281
- keras/src/dtype_policies/__init__.py,sha256=BxFBq8xa6Cb-LndCZhIACX1ubym7Ky8tUQ8rxIVdeKg,3632
282
- keras/src/dtype_policies/dtype_policy.py,sha256=0gE6tGKUDRcrNCi-4_UU2sMFO_zMc8QGd3gzPBI-Rfs,16005
281
+ keras/src/dtype_policies/__init__.py,sha256=8Ju8ICwTDdBfBrSoL6kmMzrcgMwMg6dPa1UPMDZKTqc,3717
282
+ keras/src/dtype_policies/dtype_policy.py,sha256=3e2J589g0NUALz0LycSb3anh47g5LWIgn0jKR26B-Cs,19143
283
283
  keras/src/dtype_policies/dtype_policy_map.py,sha256=DqDYlssUGSiTqawPpaVRvR6ljYD8DJrFERCxXVVFvBE,10840
284
284
  keras/src/export/__init__.py,sha256=wQfjXEPN1YO2n0gz-7Eme0y_vq86s3SEWkZgs534sns,366
285
285
  keras/src/export/export_utils.py,sha256=DpfA5yI37gaMjyESxGTlf7aQ8FhYp0u8LQKxyKiFaoU,5585
@@ -295,7 +295,7 @@ keras/src/initializers/initializer.py,sha256=kNAyRA8CzBdtknT6ZUt5XIO2_Z9NzpN119C
295
295
  keras/src/initializers/random_initializers.py,sha256=AuUeQ3YZGakDKTCs8njQLhozE6iWYHwP6-VstnEMOaQ,23631
296
296
  keras/src/layers/__init__.py,sha256=s7jrOesk0YMUKCxe5BTdQ5cxqrnkYbA-GWRoCXuqpsg,12103
297
297
  keras/src/layers/input_spec.py,sha256=cjBUBmgdneJfhvbI-WLqSapJInCsxliWBygyfMWgkj4,10010
298
- keras/src/layers/layer.py,sha256=KOgSWAzj4fe3_ohIKPFStMw3r20ED9vzqYTq_e-SeLw,79436
298
+ keras/src/layers/layer.py,sha256=Nbs9ke8ecAhTffiHyZ2cJUIt-3yaJb5fcjXKJAnOCHE,79634
299
299
  keras/src/layers/activations/__init__.py,sha256=MhPBye8WWLSf_iDel3BuuqYk4nx6Sym8s4dZKb1KTqQ,272
300
300
  keras/src/layers/activations/activation.py,sha256=c_Q5gUjCTD70a9-I1m5eEPcrWPpE-5iAlkDMt4lxRgA,1287
301
301
  keras/src/layers/activations/elu.py,sha256=jtszCDe6Cs_L3jITK3ascKouqgYUxdbGvT60kxQbcHM,840
@@ -324,9 +324,9 @@ keras/src/layers/convolutional/depthwise_conv2d.py,sha256=rnCd_S3UVeNdVotjKW1Wlo
324
324
  keras/src/layers/convolutional/separable_conv1d.py,sha256=vL5qzdaSOOTgyn1A6y9IZZbQOEeB6FedPk9JJI5wqSY,6452
325
325
  keras/src/layers/convolutional/separable_conv2d.py,sha256=ZkLOnA6l5UV3GuJufwlOHMOm1S-xkt6sdF-qmP4PDjw,6533
326
326
  keras/src/layers/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
327
- keras/src/layers/core/dense.py,sha256=pCNxqIhUFZpAGicg7ls8fN2FunM-RP02CwSsCO22MfA,36378
328
- keras/src/layers/core/einsum_dense.py,sha256=tOvrYyRWSGh1JxIxpPgCFRydqdIht82WUznM9BEqe08,64507
329
- keras/src/layers/core/embedding.py,sha256=V-ZiIsL0uMpuv9nBqF4PwilU0ajsjnuN3XbBjy29kFM,22639
327
+ keras/src/layers/core/dense.py,sha256=oe0vpcYAMeulF39TrQAeSm-PrNafrpqj6GPlOFQ-eq4,40477
328
+ keras/src/layers/core/einsum_dense.py,sha256=XM58eGQjLSSPIbOdzFmcehJ12eBSGPdRnZ04Qjzwvqs,69554
329
+ keras/src/layers/core/embedding.py,sha256=88x_dkaDlGD-3WtGJf58Us68q2ub8SY36ln9Cz3G75Q,22646
330
330
  keras/src/layers/core/identity.py,sha256=o0gLHlXL7eNJEbXIgIsgBsZX97K6jN9n3qPXprkXQ9Y,848
331
331
  keras/src/layers/core/input_layer.py,sha256=RQn1KHtUd6fPBPL9rs46X8KHmr1eGo7moLg8U5tlVl0,8168
332
332
  keras/src/layers/core/lambda_layer.py,sha256=Wplek4hOwh_rwXz4_bpz0pXzKe26ywz52glh5uD0l4w,9272
@@ -492,7 +492,7 @@ keras/src/metrics/regression_metrics.py,sha256=eLacV_8CKtzA26BJDJuncUDATuL1x8O6S
492
492
  keras/src/models/__init__.py,sha256=DPbBPSfIGgsufTfJH5U5xJOeN_Ef4FMadT7KKYg3Kjg,143
493
493
  keras/src/models/cloning.py,sha256=P0gMH3H9nyz6SMsdt4BQO05rXFa4qiqZk44rFpEnHsM,15945
494
494
  keras/src/models/functional.py,sha256=uD-qH9WwAUhaBrAEWAKnsVvKo0tvdHxa1M0dbBOE96Y,34086
495
- keras/src/models/model.py,sha256=9kM6rbiAZOx3ycq2qM7QV6h2P1di57rA2HlljstSkh8,42215
495
+ keras/src/models/model.py,sha256=szseM7sjfNkdOGytF25nijhjERBu_66WPSYaJ719VBY,42408
496
496
  keras/src/models/sequential.py,sha256=CC9Q1BNB9m7TkgMHRyjOzhQvneng576wJpmdgHrACKY,14352
497
497
  keras/src/models/variable_mapping.py,sha256=FVtcgjBRqOxtvkzOE6kjG9SpcB9keDg2gS5LOTlXvG0,2181
498
498
  keras/src/ops/__init__.py,sha256=aORlvnrqY_eQl0EFLWdpHsXHnQ6JLSw1qhwJMr-VXJ0,644
@@ -504,7 +504,7 @@ keras/src/ops/linalg.py,sha256=3V8S_cgNxZZCIFcFj-FBHTdRqWNbimDtumMvfoc0f30,26736
504
504
  keras/src/ops/math.py,sha256=4qYMJ5qAPmeSyeF63YWoGbUkQt6f4_VX0enOChU4mXU,37233
505
505
  keras/src/ops/nn.py,sha256=04gjHB2BWusy4tWm59EO5Ns1paJC5umDNGwNCKzaJWQ,104658
506
506
  keras/src/ops/node.py,sha256=aJgn9D-GkteE--Bbt2cZ9JjVxb2W2uS1OWEKoeLsl3Y,5583
507
- keras/src/ops/numpy.py,sha256=H-OdMMkMsbVTfha0zceIMGQN8Nr2S5iNAT0oKwLjB10,256861
507
+ keras/src/ops/numpy.py,sha256=6oaYtC0HDCPAKjn3lG4Yd9SjYKL8nRySC0IA9aXsKbg,257233
508
508
  keras/src/ops/operation.py,sha256=A7sh9Hi6kZb7wkeMmhrDQIq770ofANXuP-Qg-kwCM3o,15485
509
509
  keras/src/ops/operation_utils.py,sha256=C6eThl-haKzlDH0fC1rn5-P1P-pCfIfXs-fy-ADR534,14523
510
510
  keras/src/ops/symbolic_arguments.py,sha256=MKwXxZYkyouD9BPmQ1uUNxILdcwPvTayAqXaUV3P3o4,1628
@@ -527,12 +527,15 @@ keras/src/optimizers/rmsprop.py,sha256=DCbmmViUnYCHMCO9YCtC2wGzPXxNPBJhkpwAmROOz
527
527
  keras/src/optimizers/sgd.py,sha256=_3xanWOI0s2dISxEVT7i_tehsWakQQz2y480Iwkonas,4396
528
528
  keras/src/optimizers/schedules/__init__.py,sha256=vuUuHNTev8sD2-swsuq7zqyYbmaOhDyiIE6F3dGGSZU,546
529
529
  keras/src/optimizers/schedules/learning_rate_schedule.py,sha256=WI5QuaWFsEFJhRfLy0KCmkxKwGBMnmgMLYsWC_4YbCo,35828
530
- keras/src/quantizers/__init__.py,sha256=P5AJBRuL-4K75JGeONRb0WxQpJxEA6dCGdQgst_GBoc,2390
531
- keras/src/quantizers/gptq.py,sha256=fKrmT_pxpTCMXhNt34m9K4NWGdUXX-1SUX6I4buE2Vg,20175
530
+ keras/src/quantizers/__init__.py,sha256=3LlZ1Z5G5mYVdrZ2xnoFgW28OFneYc_Ys2dzuJ3S0nk,2459
531
+ keras/src/quantizers/awq.py,sha256=i7loWty9LEzfP04_FAyrRbKEXShkoQeScLNSuxRVKu8,13334
532
+ keras/src/quantizers/awq_config.py,sha256=jPD8-SRmWn_uHd1YtUEfI6V8fxmylOF8btUJMerVaEs,5701
533
+ keras/src/quantizers/awq_core.py,sha256=AJgbySMvSwENM1u-e08qb00mX5ub3egUrs677UdYKeQ,7640
534
+ keras/src/quantizers/gptq.py,sha256=ph6e-mzjxD0gGn98KiDS48muiScnfFvLnKFCbo1Ik7o,20123
532
535
  keras/src/quantizers/gptq_config.py,sha256=zpPWsbfAdYZArhk_alSnFK-nBj92bdJZBzkSM1MKl5g,8925
533
- keras/src/quantizers/gptq_core.py,sha256=u-kb58KfmcYg4-TNFPqvBJ9z8a7Yqgu6Qz87tvsOEgI,16935
534
- keras/src/quantizers/quantization_config.py,sha256=9bxhF9EwG9Q49rpMiHh8PLwMF0Gxc5JyNCNKPwLx5uQ,7631
535
- keras/src/quantizers/quantizers.py,sha256=BDD3vi_15lmOY_ybI7oQDgINYlM9CF0QSQuP6kzjXH4,35964
536
+ keras/src/quantizers/gptq_core.py,sha256=EKhdTXZQ1uo45KYJcO0h9bMTbVZH4pmqs4irQy9r47o,16945
537
+ keras/src/quantizers/quantization_config.py,sha256=8jGV1lzLC-gk37U4By2ol0QJ9T3LNuMynet40xWlxjg,8117
538
+ keras/src/quantizers/quantizers.py,sha256=QzImbGjVaa6pISxhOJPwq7seQC4s_EToI5JHKp0gbkk,36698
536
539
  keras/src/quantizers/utils.py,sha256=i6e5MobXrQeKA6zFenjzUNoDDWRGL9bcfgdbE_-0IeM,672
537
540
  keras/src/random/__init__.py,sha256=BmXVYPzxbhADohoLtAEEzB3cesP7YBFDsp1qc6BWWlg,420
538
541
  keras/src/random/random.py,sha256=bUADZIVDuCghwIWTk0qBxXTxUdiNGWIdsRi8QJ3ePg4,17581
@@ -615,7 +618,7 @@ keras/utils/bounding_boxes/__init__.py,sha256=jtvQll4u8ZY0Z96HwNhP1nxWEG9FM3gI-6
615
618
  keras/utils/legacy/__init__.py,sha256=oSYZz6uS8UxSElRaaJYWJEoweJ4GAasZjnn7fNaOlog,342
616
619
  keras/visualization/__init__.py,sha256=UKWmiy6sps4SWlmQi9WX8_Z53cPpLlphz2zIeHdwJpQ,722
617
620
  keras/wrappers/__init__.py,sha256=QkS-O5K8qGS7C3sytF8MpmO6PasATpNVGF8qtb7Ojsw,407
618
- keras_nightly-3.14.0.dev2026011404.dist-info/METADATA,sha256=kQdBx6UVd9ve6T2k1-rLnycFlcoNFSJWmzXOerCw39g,6339
619
- keras_nightly-3.14.0.dev2026011404.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
620
- keras_nightly-3.14.0.dev2026011404.dist-info/top_level.txt,sha256=ptcw_-QuGZ4ZDjMdwi_Z0clZm8QAqFdvzzFnDEOTs9o,6
621
- keras_nightly-3.14.0.dev2026011404.dist-info/RECORD,,
621
+ keras_nightly-3.14.0.dev2026011504.dist-info/METADATA,sha256=6s-lhD6ZQgn-dl1KiHGJMLNttJ4ir79glEAKZAMn6fI,6339
622
+ keras_nightly-3.14.0.dev2026011504.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
623
+ keras_nightly-3.14.0.dev2026011504.dist-info/top_level.txt,sha256=ptcw_-QuGZ4ZDjMdwi_Z0clZm8QAqFdvzzFnDEOTs9o,6
624
+ keras_nightly-3.14.0.dev2026011504.dist-info/RECORD,,