keras-nightly 3.14.0.dev2026013104__py3-none-any.whl → 3.14.0.dev2026020304__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- keras/_tf_keras/keras/ops/__init__.py +1 -0
- keras/_tf_keras/keras/ops/numpy/__init__.py +1 -0
- keras/ops/__init__.py +1 -0
- keras/ops/numpy/__init__.py +1 -0
- keras/src/backend/jax/numpy.py +20 -6
- keras/src/backend/numpy/numpy.py +13 -0
- keras/src/backend/openvino/numpy.py +20 -6
- keras/src/backend/tensorflow/numpy.py +11 -3
- keras/src/backend/torch/numpy.py +19 -0
- keras/src/export/onnx.py +106 -27
- keras/src/ops/image.py +4 -0
- keras/src/ops/numpy.py +66 -0
- keras/src/testing/__init__.py +1 -0
- keras/src/testing/test_case.py +4 -7
- keras/src/version.py +1 -1
- {keras_nightly-3.14.0.dev2026013104.dist-info → keras_nightly-3.14.0.dev2026020304.dist-info}/METADATA +1 -1
- {keras_nightly-3.14.0.dev2026013104.dist-info → keras_nightly-3.14.0.dev2026020304.dist-info}/RECORD +19 -19
- {keras_nightly-3.14.0.dev2026013104.dist-info → keras_nightly-3.14.0.dev2026020304.dist-info}/WHEEL +0 -0
- {keras_nightly-3.14.0.dev2026013104.dist-info → keras_nightly-3.14.0.dev2026020304.dist-info}/top_level.txt +0 -0
|
@@ -248,6 +248,7 @@ from keras.src.ops.numpy import nan_to_num as nan_to_num
|
|
|
248
248
|
from keras.src.ops.numpy import nanmax as nanmax
|
|
249
249
|
from keras.src.ops.numpy import nanmean as nanmean
|
|
250
250
|
from keras.src.ops.numpy import nanmin as nanmin
|
|
251
|
+
from keras.src.ops.numpy import nanprod as nanprod
|
|
251
252
|
from keras.src.ops.numpy import nansum as nansum
|
|
252
253
|
from keras.src.ops.numpy import ndim as ndim
|
|
253
254
|
from keras.src.ops.numpy import negative as negative
|
|
@@ -132,6 +132,7 @@ from keras.src.ops.numpy import nan_to_num as nan_to_num
|
|
|
132
132
|
from keras.src.ops.numpy import nanmax as nanmax
|
|
133
133
|
from keras.src.ops.numpy import nanmean as nanmean
|
|
134
134
|
from keras.src.ops.numpy import nanmin as nanmin
|
|
135
|
+
from keras.src.ops.numpy import nanprod as nanprod
|
|
135
136
|
from keras.src.ops.numpy import nansum as nansum
|
|
136
137
|
from keras.src.ops.numpy import ndim as ndim
|
|
137
138
|
from keras.src.ops.numpy import negative as negative
|
keras/ops/__init__.py
CHANGED
|
@@ -248,6 +248,7 @@ from keras.src.ops.numpy import nan_to_num as nan_to_num
|
|
|
248
248
|
from keras.src.ops.numpy import nanmax as nanmax
|
|
249
249
|
from keras.src.ops.numpy import nanmean as nanmean
|
|
250
250
|
from keras.src.ops.numpy import nanmin as nanmin
|
|
251
|
+
from keras.src.ops.numpy import nanprod as nanprod
|
|
251
252
|
from keras.src.ops.numpy import nansum as nansum
|
|
252
253
|
from keras.src.ops.numpy import ndim as ndim
|
|
253
254
|
from keras.src.ops.numpy import negative as negative
|
keras/ops/numpy/__init__.py
CHANGED
|
@@ -132,6 +132,7 @@ from keras.src.ops.numpy import nan_to_num as nan_to_num
|
|
|
132
132
|
from keras.src.ops.numpy import nanmax as nanmax
|
|
133
133
|
from keras.src.ops.numpy import nanmean as nanmean
|
|
134
134
|
from keras.src.ops.numpy import nanmin as nanmin
|
|
135
|
+
from keras.src.ops.numpy import nanprod as nanprod
|
|
135
136
|
from keras.src.ops.numpy import nansum as nansum
|
|
136
137
|
from keras.src.ops.numpy import ndim as ndim
|
|
137
138
|
from keras.src.ops.numpy import negative as negative
|
keras/src/backend/jax/numpy.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import builtins
|
|
2
2
|
import math
|
|
3
3
|
|
|
4
|
+
import jax
|
|
4
5
|
import jax.experimental.sparse as jax_sparse
|
|
5
6
|
import jax.numpy as jnp
|
|
6
7
|
from jax import export as jax_export
|
|
@@ -16,6 +17,18 @@ from keras.src.backend.jax.core import cast
|
|
|
16
17
|
from keras.src.backend.jax.core import convert_to_tensor
|
|
17
18
|
|
|
18
19
|
|
|
20
|
+
def _uses_cpu(x):
|
|
21
|
+
if hasattr(x, "device"):
|
|
22
|
+
device = x.device
|
|
23
|
+
if not isinstance(device, jax.Device):
|
|
24
|
+
# Array is sharded.
|
|
25
|
+
return False
|
|
26
|
+
return device.platform == "cpu"
|
|
27
|
+
else:
|
|
28
|
+
# This is a Tracer, not a concrete Array.
|
|
29
|
+
return jax.default_backend() == "cpu"
|
|
30
|
+
|
|
31
|
+
|
|
19
32
|
def rot90(array, k=1, axes=(0, 1)):
|
|
20
33
|
"""Rotate an array by 90 degrees in the specified plane."""
|
|
21
34
|
if array.ndim < 2:
|
|
@@ -402,11 +415,9 @@ def arctanh(x):
|
|
|
402
415
|
|
|
403
416
|
|
|
404
417
|
def argmax(x, axis=None, keepdims=False):
|
|
405
|
-
from keras.src.testing.test_case import uses_cpu
|
|
406
|
-
|
|
407
418
|
x = convert_to_tensor(x)
|
|
408
419
|
dtype = standardize_dtype(x.dtype)
|
|
409
|
-
if "float" not in dtype or
|
|
420
|
+
if "float" not in dtype or x.ndim == 0 or not _uses_cpu(x):
|
|
410
421
|
return jnp.argmax(x, axis=axis, keepdims=keepdims)
|
|
411
422
|
|
|
412
423
|
# Fix the flush-to-zero (FTZ) issue based on this issue:
|
|
@@ -419,11 +430,9 @@ def argmax(x, axis=None, keepdims=False):
|
|
|
419
430
|
|
|
420
431
|
|
|
421
432
|
def argmin(x, axis=None, keepdims=False):
|
|
422
|
-
from keras.src.testing.test_case import uses_cpu
|
|
423
|
-
|
|
424
433
|
x = convert_to_tensor(x)
|
|
425
434
|
dtype = standardize_dtype(x.dtype)
|
|
426
|
-
if "float" not in dtype or
|
|
435
|
+
if "float" not in dtype or x.ndim == 0 or not _uses_cpu(x):
|
|
427
436
|
return jnp.argmin(x, axis=axis, keepdims=keepdims)
|
|
428
437
|
|
|
429
438
|
# Fix the flush-to-zero (FTZ) issue based on this issue:
|
|
@@ -1028,6 +1037,11 @@ def nanmin(x, axis=None, keepdims=False):
|
|
|
1028
1037
|
return jnp.nanmin(x, axis=axis, keepdims=keepdims)
|
|
1029
1038
|
|
|
1030
1039
|
|
|
1040
|
+
def nanprod(x, axis=None, keepdims=False):
|
|
1041
|
+
x = convert_to_tensor(x)
|
|
1042
|
+
return jnp.nanprod(x, axis=axis, keepdims=keepdims)
|
|
1043
|
+
|
|
1044
|
+
|
|
1031
1045
|
def nansum(x, axis=None, keepdims=False):
|
|
1032
1046
|
x = convert_to_tensor(x)
|
|
1033
1047
|
return jnp.nansum(x, axis=axis, keepdims=keepdims)
|
keras/src/backend/numpy/numpy.py
CHANGED
|
@@ -973,6 +973,19 @@ def nanmin(x, axis=None, keepdims=False):
|
|
|
973
973
|
return np.nanmin(x, axis=axis, keepdims=keepdims)
|
|
974
974
|
|
|
975
975
|
|
|
976
|
+
def nanprod(x, axis=None, keepdims=False):
|
|
977
|
+
axis = standardize_axis_for_numpy(axis)
|
|
978
|
+
|
|
979
|
+
x = convert_to_tensor(x)
|
|
980
|
+
|
|
981
|
+
dtype = dtypes.result_type(x.dtype)
|
|
982
|
+
if dtype in ("bool", "int8", "int16"):
|
|
983
|
+
dtype = "int32"
|
|
984
|
+
elif dtype in ("uint8", "uint16"):
|
|
985
|
+
dtype = "uint32"
|
|
986
|
+
return np.nanprod(x, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
987
|
+
|
|
988
|
+
|
|
976
989
|
def nansum(x, axis=None, keepdims=False):
|
|
977
990
|
axis = standardize_axis_for_numpy(axis)
|
|
978
991
|
dtype = standardize_dtype(x.dtype)
|
|
@@ -366,6 +366,10 @@ def arctan2(x1, x2):
|
|
|
366
366
|
x1 = ov_opset.convert(x1, result_type)
|
|
367
367
|
x2 = ov_opset.convert(x2, result_type)
|
|
368
368
|
|
|
369
|
+
nan_x1 = ov_opset.is_nan(x1)
|
|
370
|
+
nan_x2 = ov_opset.is_nan(x2)
|
|
371
|
+
nan_mask = ov_opset.logical_or(nan_x1, nan_x2)
|
|
372
|
+
|
|
369
373
|
x = ov_opset.divide(x1, x2)
|
|
370
374
|
y = ov_opset.atan(x)
|
|
371
375
|
|
|
@@ -375,12 +379,12 @@ def arctan2(x1, x2):
|
|
|
375
379
|
neg_half_pi = ov_opset.constant(-float(np.pi / 2), ov_type)
|
|
376
380
|
zero_const = ov_opset.constant(0.0, ov_type)
|
|
377
381
|
|
|
378
|
-
cond_x2_gt0 = ov_opset.greater(x2, zero_const)
|
|
379
|
-
cond_x2_lt0 = ov_opset.less(x2, zero_const)
|
|
382
|
+
cond_x2_gt0 = ov_opset.greater(x2, zero_const)
|
|
383
|
+
cond_x2_lt0 = ov_opset.less(x2, zero_const)
|
|
380
384
|
|
|
381
|
-
cond_x1_ge0 = ov_opset.greater_equal(x1, zero_const)
|
|
382
|
-
cond_x1_gt0 = ov_opset.greater(x1, zero_const)
|
|
383
|
-
cond_x1_eq0 = ov_opset.equal(x1, zero_const)
|
|
385
|
+
cond_x1_ge0 = ov_opset.greater_equal(x1, zero_const)
|
|
386
|
+
cond_x1_gt0 = ov_opset.greater(x1, zero_const)
|
|
387
|
+
cond_x1_eq0 = ov_opset.equal(x1, zero_const)
|
|
384
388
|
|
|
385
389
|
out_x2_lt0 = ov_opset.select(
|
|
386
390
|
cond_x1_ge0,
|
|
@@ -393,7 +397,11 @@ def arctan2(x1, x2):
|
|
|
393
397
|
|
|
394
398
|
out_not_pos = ov_opset.select(cond_x2_lt0, out_x2_lt0, out_x2_zero)
|
|
395
399
|
|
|
396
|
-
|
|
400
|
+
value_out = ov_opset.select(cond_x2_gt0, y, out_not_pos)
|
|
401
|
+
|
|
402
|
+
# Generate NaN safely for all floating dtypes (including bf16)
|
|
403
|
+
nan_value = ov_opset.divide(zero_const, zero_const)
|
|
404
|
+
final_out = ov_opset.select(nan_mask, nan_value, value_out)
|
|
397
405
|
return OpenVINOKerasTensor(final_out.output(0))
|
|
398
406
|
|
|
399
407
|
|
|
@@ -2121,6 +2129,12 @@ def nanmin(x, axis=None, keepdims=False):
|
|
|
2121
2129
|
raise NotImplementedError("`nanmin` is not supported with openvino backend")
|
|
2122
2130
|
|
|
2123
2131
|
|
|
2132
|
+
def nanprod(x, axis=None, keepdims=False):
|
|
2133
|
+
raise NotImplementedError(
|
|
2134
|
+
"`nanprod` is not supported with openvino backend"
|
|
2135
|
+
)
|
|
2136
|
+
|
|
2137
|
+
|
|
2124
2138
|
def nansum(x, axis=None, keepdims=False):
|
|
2125
2139
|
x = get_ov_output(x)
|
|
2126
2140
|
x_type = x.get_element_type()
|
|
@@ -949,11 +949,9 @@ def argmax(x, axis=None, keepdims=False):
|
|
|
949
949
|
|
|
950
950
|
|
|
951
951
|
def argmin(x, axis=None, keepdims=False):
|
|
952
|
-
from keras.src.testing.test_case import uses_cpu
|
|
953
|
-
|
|
954
952
|
x = convert_to_tensor(x)
|
|
955
953
|
dtype = standardize_dtype(x.dtype)
|
|
956
|
-
if "float" not in dtype or
|
|
954
|
+
if "float" not in dtype or x.ndim == 0:
|
|
957
955
|
_x = x
|
|
958
956
|
if axis is None:
|
|
959
957
|
x = tf.reshape(x, [-1])
|
|
@@ -2186,6 +2184,16 @@ def nanmin(x, axis=None, keepdims=False):
|
|
|
2186
2184
|
)
|
|
2187
2185
|
|
|
2188
2186
|
|
|
2187
|
+
def nanprod(x, axis=None, keepdims=False):
|
|
2188
|
+
x = convert_to_tensor(x)
|
|
2189
|
+
|
|
2190
|
+
if not x.dtype.is_floating:
|
|
2191
|
+
return prod(x, axis=axis, keepdims=keepdims)
|
|
2192
|
+
|
|
2193
|
+
x_safe = tf.where(tf.math.is_nan(x), tf.ones((), dtype=x.dtype), x)
|
|
2194
|
+
return prod(x_safe, axis=axis, keepdims=keepdims)
|
|
2195
|
+
|
|
2196
|
+
|
|
2189
2197
|
def nansum(x, axis=None, keepdims=False):
|
|
2190
2198
|
x = convert_to_tensor(x)
|
|
2191
2199
|
dtype = standardize_dtype(x.dtype)
|
keras/src/backend/torch/numpy.py
CHANGED
|
@@ -1318,6 +1318,25 @@ def nanmin(x, axis=None, keepdims=False):
|
|
|
1318
1318
|
)
|
|
1319
1319
|
|
|
1320
1320
|
|
|
1321
|
+
def nanprod(x, axis=None, keepdims=False):
|
|
1322
|
+
x = convert_to_tensor(x)
|
|
1323
|
+
|
|
1324
|
+
if axis == () or axis == []:
|
|
1325
|
+
return torch.nan_to_num(x, nan=1)
|
|
1326
|
+
|
|
1327
|
+
if isinstance(axis, (list, tuple)):
|
|
1328
|
+
axis = sorted(axis, reverse=True)
|
|
1329
|
+
|
|
1330
|
+
if not torch.is_floating_point(x):
|
|
1331
|
+
return prod(x, axis=axis, keepdims=keepdims)
|
|
1332
|
+
|
|
1333
|
+
return prod(
|
|
1334
|
+
torch.where(torch.isnan(x), torch.ones((), dtype=x.dtype), x),
|
|
1335
|
+
axis=axis,
|
|
1336
|
+
keepdims=keepdims,
|
|
1337
|
+
)
|
|
1338
|
+
|
|
1339
|
+
|
|
1321
1340
|
def nansum(x, axis=None, keepdims=False):
|
|
1322
1341
|
if isinstance(x, (list, tuple)):
|
|
1323
1342
|
x = stack(x)
|
keras/src/export/onnx.py
CHANGED
|
@@ -76,13 +76,19 @@ def export_onnx(
|
|
|
76
76
|
if input_signature is None:
|
|
77
77
|
input_signature = get_input_signature(model)
|
|
78
78
|
if not input_signature or not model._called:
|
|
79
|
-
raise ValueError(
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
79
|
+
raise ValueError("The model provided has never called. ")
|
|
80
|
+
|
|
81
|
+
# Extract specs for proper input name generation
|
|
82
|
+
if len(input_signature) == 1 and isinstance(input_signature[0], list):
|
|
83
|
+
# Multi-input case: input_signature = [[spec1, spec2, ...]]
|
|
84
|
+
specs_for_names = input_signature[0]
|
|
85
|
+
else:
|
|
86
|
+
# Single input case: input_signature = [spec]
|
|
87
|
+
specs_for_names = input_signature
|
|
88
|
+
|
|
83
89
|
input_names = [
|
|
84
90
|
getattr(spec, "name", None) or f"input_{i}"
|
|
85
|
-
for i, spec in enumerate(
|
|
91
|
+
for i, spec in enumerate(specs_for_names)
|
|
86
92
|
]
|
|
87
93
|
|
|
88
94
|
if backend.backend() in ("tensorflow", "jax"):
|
|
@@ -105,10 +111,37 @@ def export_onnx(
|
|
|
105
111
|
elif backend.backend() == "torch":
|
|
106
112
|
import torch
|
|
107
113
|
|
|
114
|
+
"""Generate dynamic_axes format for ONNX export."""
|
|
115
|
+
dynamic_axes = {}
|
|
116
|
+
|
|
117
|
+
for input_idx, spec in enumerate(specs_for_names):
|
|
118
|
+
if not hasattr(spec, "shape"):
|
|
119
|
+
continue
|
|
120
|
+
|
|
121
|
+
shape = spec.shape
|
|
122
|
+
dynamic_dims = {}
|
|
123
|
+
|
|
124
|
+
for dim_idx, dim_size in enumerate(shape):
|
|
125
|
+
if dim_size is None:
|
|
126
|
+
if dim_idx == 0:
|
|
127
|
+
dim_name = "batch"
|
|
128
|
+
else:
|
|
129
|
+
dim_name = f"dim_{input_idx}_{dim_idx}"
|
|
130
|
+
dynamic_dims[dim_idx] = dim_name
|
|
131
|
+
|
|
132
|
+
if dynamic_dims:
|
|
133
|
+
input_name = (
|
|
134
|
+
input_names[input_idx]
|
|
135
|
+
if input_idx < len(input_names)
|
|
136
|
+
else f"input_{input_idx}"
|
|
137
|
+
)
|
|
138
|
+
dynamic_axes[input_name] = dynamic_dims
|
|
139
|
+
|
|
108
140
|
sample_inputs = tree.map_structure(
|
|
109
141
|
lambda x: convert_spec_to_tensor(x, replace_none_number=1),
|
|
110
142
|
input_signature,
|
|
111
143
|
)
|
|
144
|
+
|
|
112
145
|
sample_inputs = tuple(sample_inputs)
|
|
113
146
|
# TODO: Make dict model exportable.
|
|
114
147
|
if any(isinstance(x, dict) for x in sample_inputs):
|
|
@@ -140,34 +173,80 @@ def export_onnx(
|
|
|
140
173
|
warnings.filterwarnings(
|
|
141
174
|
"ignore", message=r".*suppressed about get_attr references.*"
|
|
142
175
|
)
|
|
176
|
+
# Suppress TorchScript tracing warnings
|
|
177
|
+
warnings.filterwarnings(
|
|
178
|
+
"ignore",
|
|
179
|
+
message=r".*Converting a tensor to a Python boolean.*",
|
|
180
|
+
category=torch.jit.TracerWarning,
|
|
181
|
+
)
|
|
182
|
+
warnings.filterwarnings(
|
|
183
|
+
"ignore",
|
|
184
|
+
message=r".*Converting a tensor to a Python integer.*",
|
|
185
|
+
category=torch.jit.TracerWarning,
|
|
186
|
+
)
|
|
187
|
+
warnings.filterwarnings(
|
|
188
|
+
"ignore",
|
|
189
|
+
message=r".*Iterating over a tensor.*",
|
|
190
|
+
category=torch.jit.TracerWarning,
|
|
191
|
+
)
|
|
192
|
+
warnings.filterwarnings(
|
|
193
|
+
"ignore",
|
|
194
|
+
message=r".*Using len to get tensor shape.*",
|
|
195
|
+
category=torch.jit.TracerWarning,
|
|
196
|
+
)
|
|
197
|
+
warnings.filterwarnings(
|
|
198
|
+
"ignore",
|
|
199
|
+
message=r".*torch.tensor results are registered as constants.*",
|
|
200
|
+
category=torch.jit.TracerWarning,
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
# When dynamic shapes are present, prefer TorchScript over
|
|
204
|
+
# TorchDynamo because TorchDynamo has constraint inference issues
|
|
205
|
+
# with dynamic dimensions
|
|
206
|
+
if not dynamic_axes:
|
|
143
207
|
try:
|
|
144
|
-
# Try the TorchDynamo-based ONNX exporter first
|
|
208
|
+
# Try the TorchDynamo-based ONNX exporter first for static
|
|
209
|
+
# shapes
|
|
210
|
+
export_kwargs = {
|
|
211
|
+
"verbose": actual_verbose,
|
|
212
|
+
"opset_version": opset_version,
|
|
213
|
+
"input_names": input_names,
|
|
214
|
+
"dynamo": True,
|
|
215
|
+
}
|
|
216
|
+
|
|
145
217
|
onnx_program = torch.onnx.export(
|
|
146
|
-
model,
|
|
147
|
-
sample_inputs,
|
|
148
|
-
verbose=actual_verbose,
|
|
149
|
-
opset_version=opset_version,
|
|
150
|
-
input_names=input_names,
|
|
151
|
-
dynamo=True,
|
|
218
|
+
model, sample_inputs, **export_kwargs
|
|
152
219
|
)
|
|
153
220
|
if hasattr(onnx_program, "optimize"):
|
|
154
221
|
onnx_program.optimize() # Only supported by torch>=2.6.0.
|
|
155
222
|
onnx_program.save(filepath)
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
223
|
+
|
|
224
|
+
return
|
|
225
|
+
except Exception:
|
|
226
|
+
pass
|
|
227
|
+
|
|
228
|
+
"""Export using TorchScript-based ONNX exporter."""
|
|
229
|
+
# Set verbose to False for TorchScript due to file system leakage
|
|
230
|
+
torchscript_verbose = verbose
|
|
231
|
+
if verbose is None:
|
|
232
|
+
# Set to `False` due to file system leakage issue:
|
|
233
|
+
# https://github.com/keras-team/keras/issues/20826
|
|
234
|
+
torchscript_verbose = False
|
|
235
|
+
|
|
236
|
+
export_kwargs = {
|
|
237
|
+
"verbose": torchscript_verbose,
|
|
238
|
+
"opset_version": opset_version,
|
|
239
|
+
"input_names": input_names,
|
|
240
|
+
"export_params": True,
|
|
241
|
+
"do_constant_folding": True,
|
|
242
|
+
"dynamo": False,
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
# For TorchScript (dynamo=False), use dynamic_axes parameter
|
|
246
|
+
if dynamic_axes:
|
|
247
|
+
export_kwargs["dynamic_axes"] = dynamic_axes
|
|
248
|
+
|
|
249
|
+
torch.onnx.export(model, sample_inputs, filepath, **export_kwargs)
|
|
171
250
|
else:
|
|
172
251
|
raise NotImplementedError(
|
|
173
252
|
"`export_onnx` is only compatible with TensorFlow, JAX and "
|
keras/src/ops/image.py
CHANGED
|
@@ -342,6 +342,10 @@ def resize(
|
|
|
342
342
|
"Expected `size` to be a tuple of 2 integers. "
|
|
343
343
|
f"Received: size={size}"
|
|
344
344
|
)
|
|
345
|
+
if size[0] <= 0 or size[1] <= 0:
|
|
346
|
+
raise ValueError(
|
|
347
|
+
f"`size` must have positive height and width. Received: size={size}"
|
|
348
|
+
)
|
|
345
349
|
if len(images.shape) < 3 or len(images.shape) > 4:
|
|
346
350
|
raise ValueError(
|
|
347
351
|
"Invalid images rank: expected rank 3 (single image) "
|
keras/src/ops/numpy.py
CHANGED
|
@@ -5231,6 +5231,72 @@ def nanmin(x, axis=None, keepdims=False):
|
|
|
5231
5231
|
return backend.numpy.nanmin(x, axis=axis, keepdims=keepdims)
|
|
5232
5232
|
|
|
5233
5233
|
|
|
5234
|
+
class Nanprod(Operation):
|
|
5235
|
+
def __init__(self, axis=None, keepdims=False, *, name=None):
|
|
5236
|
+
super().__init__(name=name)
|
|
5237
|
+
self.axis = axis
|
|
5238
|
+
self.keepdims = keepdims
|
|
5239
|
+
|
|
5240
|
+
def call(self, x):
|
|
5241
|
+
return backend.numpy.nanprod(
|
|
5242
|
+
x,
|
|
5243
|
+
axis=self.axis,
|
|
5244
|
+
keepdims=self.keepdims,
|
|
5245
|
+
)
|
|
5246
|
+
|
|
5247
|
+
def compute_output_spec(self, x):
|
|
5248
|
+
dtype = backend.standardize_dtype(x.dtype)
|
|
5249
|
+
|
|
5250
|
+
if dtype == "bool":
|
|
5251
|
+
dtype = "int32"
|
|
5252
|
+
elif dtype in ("int8", "int16"):
|
|
5253
|
+
dtype = "int32"
|
|
5254
|
+
elif dtype in ("uint8", "uint16"):
|
|
5255
|
+
dtype = "uint32"
|
|
5256
|
+
|
|
5257
|
+
if backend.backend() == "torch" and dtype == "uint32":
|
|
5258
|
+
dtype = "int32"
|
|
5259
|
+
|
|
5260
|
+
return KerasTensor(
|
|
5261
|
+
reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims),
|
|
5262
|
+
dtype=dtype,
|
|
5263
|
+
)
|
|
5264
|
+
|
|
5265
|
+
|
|
5266
|
+
@keras_export(["keras.ops.nanprod", "keras.ops.numpy.nanprod"])
|
|
5267
|
+
def nanprod(x, axis=None, keepdims=False):
|
|
5268
|
+
"""Product of a tensor over the given axes, ignoring NaNs.
|
|
5269
|
+
|
|
5270
|
+
Args:
|
|
5271
|
+
x: Input tensor.
|
|
5272
|
+
axis: Axis or axes along which the product is computed. The default is
|
|
5273
|
+
to compute the product of the flattened tensor.
|
|
5274
|
+
keepdims: If this is set to `True`, the axes which are reduced are left
|
|
5275
|
+
in the result as dimensions with size one.
|
|
5276
|
+
|
|
5277
|
+
Returns:
|
|
5278
|
+
Output tensor containing the product, with NaN values ignored.
|
|
5279
|
+
|
|
5280
|
+
Examples:
|
|
5281
|
+
>>> import numpy as np
|
|
5282
|
+
>>> from keras import ops
|
|
5283
|
+
>>> x = np.array([[1.0, np.nan, 3.0],
|
|
5284
|
+
... [np.nan, 2.0, 1.0]])
|
|
5285
|
+
>>> ops.nanprod(x)
|
|
5286
|
+
6.0
|
|
5287
|
+
|
|
5288
|
+
>>> ops.nanprod(x, axis=1)
|
|
5289
|
+
array([3., 2.])
|
|
5290
|
+
|
|
5291
|
+
>>> ops.nanprod(x, axis=1, keepdims=True)
|
|
5292
|
+
array([[3.],
|
|
5293
|
+
[2.]])
|
|
5294
|
+
"""
|
|
5295
|
+
if any_symbolic_tensors((x,)):
|
|
5296
|
+
return Nanprod(axis=axis, keepdims=keepdims).symbolic_call(x)
|
|
5297
|
+
return backend.numpy.nanprod(x, axis=axis, keepdims=keepdims)
|
|
5298
|
+
|
|
5299
|
+
|
|
5234
5300
|
class Nansum(Operation):
|
|
5235
5301
|
def __init__(self, axis=None, keepdims=False, *, name=None):
|
|
5236
5302
|
super().__init__(name=name)
|
keras/src/testing/__init__.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from keras.src.testing.test_case import TestCase
|
|
2
2
|
from keras.src.testing.test_case import jax_uses_gpu
|
|
3
|
+
from keras.src.testing.test_case import jax_uses_tpu
|
|
3
4
|
from keras.src.testing.test_case import tensorflow_uses_gpu
|
|
4
5
|
from keras.src.testing.test_case import torch_uses_gpu
|
|
5
6
|
from keras.src.testing.test_case import uses_gpu
|
keras/src/testing/test_case.py
CHANGED
|
@@ -650,6 +650,10 @@ def uses_gpu():
|
|
|
650
650
|
return False
|
|
651
651
|
|
|
652
652
|
|
|
653
|
+
def jax_uses_tpu():
|
|
654
|
+
return backend.backend() == "jax" and uses_tpu()
|
|
655
|
+
|
|
656
|
+
|
|
653
657
|
def uses_tpu():
|
|
654
658
|
# Condition used to skip tests when using the TPU
|
|
655
659
|
try:
|
|
@@ -661,13 +665,6 @@ def uses_tpu():
|
|
|
661
665
|
return False
|
|
662
666
|
|
|
663
667
|
|
|
664
|
-
def uses_cpu():
|
|
665
|
-
devices = distribution.list_devices()
|
|
666
|
-
if any(d.startswith("cpu") for d in devices):
|
|
667
|
-
return True
|
|
668
|
-
return False
|
|
669
|
-
|
|
670
|
-
|
|
671
668
|
def create_keras_tensors(input_shape, dtype, sparse, ragged):
|
|
672
669
|
if isinstance(input_shape, dict):
|
|
673
670
|
return {
|
keras/src/version.py
CHANGED
{keras_nightly-3.14.0.dev2026013104.dist-info → keras_nightly-3.14.0.dev2026020304.dist-info}/RECORD
RENAMED
|
@@ -45,11 +45,11 @@ keras/_tf_keras/keras/losses/__init__.py,sha256=xBc_KOtSLwp3h3CKQ0EnCuIy-Bsak2SP
|
|
|
45
45
|
keras/_tf_keras/keras/metrics/__init__.py,sha256=_wF31PTvua5ahF9JEW4Hx1UVNjVCLqVI8J5JNrZCBf8,6546
|
|
46
46
|
keras/_tf_keras/keras/mixed_precision/__init__.py,sha256=AM51CzHqzcY75tqdpQiuVcTRUEpUzBqeb-EfLeSDSV8,727
|
|
47
47
|
keras/_tf_keras/keras/models/__init__.py,sha256=83pyA0pzytqin8JLV6FEbPreCb-V64ToebxFGrHsVdQ,501
|
|
48
|
-
keras/_tf_keras/keras/ops/__init__.py,sha256=
|
|
48
|
+
keras/_tf_keras/keras/ops/__init__.py,sha256=VUR7wlaKfOzMloUgtfMVLNyNQvD1GxLNrJ0YIRC4754,15818
|
|
49
49
|
keras/_tf_keras/keras/ops/image/__init__.py,sha256=oM_PLh5Jk9OGfi1bbJcfWkjoq0Ye5JQG9a7v_KzDfoc,1034
|
|
50
50
|
keras/_tf_keras/keras/ops/linalg/__init__.py,sha256=0ab6icK3yuIm4khSfAksGRFLEAJhaOu6gGgarau4iEQ,822
|
|
51
51
|
keras/_tf_keras/keras/ops/nn/__init__.py,sha256=2eD8IlkfBrsmJjHpzsxMM3_058oGeZVgohdBd27iDnI,2992
|
|
52
|
-
keras/_tf_keras/keras/ops/numpy/__init__.py,sha256=
|
|
52
|
+
keras/_tf_keras/keras/ops/numpy/__init__.py,sha256=V7sHwuFJ9dxD06bcckB6VTjstR-LurGes9JXyA9BhiY,9880
|
|
53
53
|
keras/_tf_keras/keras/optimizers/__init__.py,sha256=1fx0vEB-oGu-9dumxoIvX4qVHdgJvf74OLyYoBkE2y0,1267
|
|
54
54
|
keras/_tf_keras/keras/optimizers/legacy/__init__.py,sha256=uIMQESCV80Q0FY-9ikQUjXYPyZqmTfAM3dfohQ5DzYs,516
|
|
55
55
|
keras/_tf_keras/keras/optimizers/schedules/__init__.py,sha256=pQF3rQiAPuUSTUdflTr-fpL77oyGIv9xzGdjae3M3kw,1120
|
|
@@ -111,11 +111,11 @@ keras/losses/__init__.py,sha256=VIXBHQFNdLUPZ7JuwtIKj_4E-xf2yvNyrmdklvjr_xM,3667
|
|
|
111
111
|
keras/metrics/__init__.py,sha256=qeEwtqpSCAaCr8BMUv1eVaqJl2Zb83OB5K0BG3JB0nI,6245
|
|
112
112
|
keras/mixed_precision/__init__.py,sha256=AM51CzHqzcY75tqdpQiuVcTRUEpUzBqeb-EfLeSDSV8,727
|
|
113
113
|
keras/models/__init__.py,sha256=83pyA0pzytqin8JLV6FEbPreCb-V64ToebxFGrHsVdQ,501
|
|
114
|
-
keras/ops/__init__.py,sha256=
|
|
114
|
+
keras/ops/__init__.py,sha256=VUR7wlaKfOzMloUgtfMVLNyNQvD1GxLNrJ0YIRC4754,15818
|
|
115
115
|
keras/ops/image/__init__.py,sha256=oM_PLh5Jk9OGfi1bbJcfWkjoq0Ye5JQG9a7v_KzDfoc,1034
|
|
116
116
|
keras/ops/linalg/__init__.py,sha256=0ab6icK3yuIm4khSfAksGRFLEAJhaOu6gGgarau4iEQ,822
|
|
117
117
|
keras/ops/nn/__init__.py,sha256=2eD8IlkfBrsmJjHpzsxMM3_058oGeZVgohdBd27iDnI,2992
|
|
118
|
-
keras/ops/numpy/__init__.py,sha256=
|
|
118
|
+
keras/ops/numpy/__init__.py,sha256=V7sHwuFJ9dxD06bcckB6VTjstR-LurGes9JXyA9BhiY,9880
|
|
119
119
|
keras/optimizers/__init__.py,sha256=1fx0vEB-oGu-9dumxoIvX4qVHdgJvf74OLyYoBkE2y0,1267
|
|
120
120
|
keras/optimizers/legacy/__init__.py,sha256=uIMQESCV80Q0FY-9ikQUjXYPyZqmTfAM3dfohQ5DzYs,516
|
|
121
121
|
keras/optimizers/schedules/__init__.py,sha256=pQF3rQiAPuUSTUdflTr-fpL77oyGIv9xzGdjae3M3kw,1120
|
|
@@ -128,7 +128,7 @@ keras/regularizers/__init__.py,sha256=542Shphw7W8h4Dyf2rmqMKUECVZ8IVBvN9g1LWhz-b
|
|
|
128
128
|
keras/saving/__init__.py,sha256=KvL2GZxjvgFgEhvEnkvqjIR9JSNHKz-NWZacXajsjLI,1298
|
|
129
129
|
keras/src/__init__.py,sha256=Gi4S7EiCMkE03PbdGNpFdaUYySWDs_FcAJ8Taz9Y1BE,684
|
|
130
130
|
keras/src/api_export.py,sha256=gXOkBOnmscV013WAc75lc4Up01-Kkg9EylIAT_QWctg,1173
|
|
131
|
-
keras/src/version.py,sha256=
|
|
131
|
+
keras/src/version.py,sha256=EgrYJtA7YKUOV4vpq9co7mADp2YlsvvhgZYUYS4YnbY,204
|
|
132
132
|
keras/src/activations/__init__.py,sha256=0nL3IFDB9unlrMz8ninKOWo-uCHasTUpTo1tXZb2u44,4433
|
|
133
133
|
keras/src/activations/activations.py,sha256=mogPggtp4CGldI3VOPNmesRxp6EbiR1_i4KLGaVwzL8,17614
|
|
134
134
|
keras/src/applications/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -171,7 +171,7 @@ keras/src/backend/jax/layer.py,sha256=o6CicT06udwamTRQIjNSDLZLyYHFzBXNbxewXgWe0i
|
|
|
171
171
|
keras/src/backend/jax/linalg.py,sha256=LDaLZYz49ChE2kJR3YpaM9xuwusvd3krV7nNAAazTWA,2642
|
|
172
172
|
keras/src/backend/jax/math.py,sha256=1IEDpdoF8e5ltu3D4wbDQuihzvJHhMXz8W9Z_E-eJqU,9391
|
|
173
173
|
keras/src/backend/jax/nn.py,sha256=mrRawNvf9EWe8rdTwK_Auz6xdLkVG6hH0nIAP7hyUDE,60271
|
|
174
|
-
keras/src/backend/jax/numpy.py,sha256=
|
|
174
|
+
keras/src/backend/jax/numpy.py,sha256=DBDcy3cSkNUsI4yQ_8bnP9U7bEzhyUhaMMd6r2tkCdY,39617
|
|
175
175
|
keras/src/backend/jax/optimizer.py,sha256=5DeXQHcYmUI6F9i1m1VHn3sBt4LEStOeBXnKdESevLM,4134
|
|
176
176
|
keras/src/backend/jax/random.py,sha256=Uk2huGIk_dlzMrx5eDVrrr2TeCEMitn2vr4yzA0NXjs,3594
|
|
177
177
|
keras/src/backend/jax/rnn.py,sha256=Ycq0qfLY4M4jhltvztpLQyywjEM17T7CZQFh4hhHOUE,7767
|
|
@@ -186,7 +186,7 @@ keras/src/backend/numpy/layer.py,sha256=dTk7W7ql7vRgll7JbOXK5PlIhQw5VHdpSjKciHd8
|
|
|
186
186
|
keras/src/backend/numpy/linalg.py,sha256=uzLTxEyuX_gDcnoA5Q59GdTg33py0WooKK5T6T9Td6c,2543
|
|
187
187
|
keras/src/backend/numpy/math.py,sha256=HdkEA5ro7dtQBTP78GFIgqTFLgNQ49PXHhqI1vLRGfo,10169
|
|
188
188
|
keras/src/backend/numpy/nn.py,sha256=P9JAnTlwSTI7bVv8WIv1pDQJHpjML_WJ0RsJWy-LJMc,46200
|
|
189
|
-
keras/src/backend/numpy/numpy.py,sha256=
|
|
189
|
+
keras/src/backend/numpy/numpy.py,sha256=FvJmqSUBhIsAujUpkGxz6SADHD3LEG5hsMKrNEj9u9g,38805
|
|
190
190
|
keras/src/backend/numpy/random.py,sha256=wx2nE75q7L2cBMjtQlQx8yKMj4Ie3puFMDQsbrZO8SA,3961
|
|
191
191
|
keras/src/backend/numpy/rnn.py,sha256=thOsMung1qR3lQsR4_D6hqKMFollQgrB0KwsJLk4BMY,7867
|
|
192
192
|
keras/src/backend/numpy/trainer.py,sha256=MzWr8_LLHa1P6fxdUWirGw_lQwHGF_vkZ7RUGLUzjUs,11126
|
|
@@ -198,7 +198,7 @@ keras/src/backend/openvino/layer.py,sha256=5RdvaH1yOyPAphjKiuQAK1H_yZFYKE1Hp7c5b
|
|
|
198
198
|
keras/src/backend/openvino/linalg.py,sha256=L6a4MFGND2wWzPVCh44cwuOgkcC4wJTo8Xy3HwW04lg,1614
|
|
199
199
|
keras/src/backend/openvino/math.py,sha256=qw9kX2sJ2qr0dBJF12Ey0E2GcwixPUqoev6UcNra4NI,3944
|
|
200
200
|
keras/src/backend/openvino/nn.py,sha256=zULPxdwVO7JDZUUtsuoEEPCLQ09ew8z8T6G_i_NEqrM,23741
|
|
201
|
-
keras/src/backend/openvino/numpy.py,sha256=
|
|
201
|
+
keras/src/backend/openvino/numpy.py,sha256=8FKiEmrGHEvhXhpE84j_ew87muFgcr9QMxNGNX5OUig,113533
|
|
202
202
|
keras/src/backend/openvino/random.py,sha256=4hRUtIP6qJxO3Qy9uH1x6jSuJna3nWPdUf4x2QU8-ew,5575
|
|
203
203
|
keras/src/backend/openvino/rnn.py,sha256=ErmuZLPSgG9qU-NfYPPvBZ6Ysy8k-fA4g19Vhqq7OVQ,866
|
|
204
204
|
keras/src/backend/openvino/trainer.py,sha256=bMmtSALqydqdS6ke-5sYW5fgxZDshDH810p_C0xCRTg,9087
|
|
@@ -211,7 +211,7 @@ keras/src/backend/tensorflow/layer.py,sha256=69d40LwL4HhKRsCjj1VRpjfrQXXF8VV3vh0
|
|
|
211
211
|
keras/src/backend/tensorflow/linalg.py,sha256=_lZVfdY1tFvrN7xwbt3INGoTR0yC5v-kI1Q0XppVibY,8773
|
|
212
212
|
keras/src/backend/tensorflow/math.py,sha256=zTu_7Ff6B2Ro862z_xH0OCmIWbV74DjsO5UnfjYuOUQ,12370
|
|
213
213
|
keras/src/backend/tensorflow/nn.py,sha256=6vtZHzUED6_blUPE1Tnc3GAxPpJ2ebxoaiMn80tTL9k,51328
|
|
214
|
-
keras/src/backend/tensorflow/numpy.py,sha256=
|
|
214
|
+
keras/src/backend/tensorflow/numpy.py,sha256=czmwm3J6I3UouRWpujDRDWo6Q80ZnHEJK8-jH6--uxw,107098
|
|
215
215
|
keras/src/backend/tensorflow/optimizer.py,sha256=kFlyEOnGjEYdLpd8mpwhUeku78__xBfZbbrDWpJrq60,9307
|
|
216
216
|
keras/src/backend/tensorflow/random.py,sha256=iO8V_soaDXZm9ewyAVbjudhsMj08C348c9Bz64nxXC4,6475
|
|
217
217
|
keras/src/backend/tensorflow/rnn.py,sha256=JbOSpt48cm612c7YwiTYOQCQsNXyI_6QeRhtUn8qEvM,34829
|
|
@@ -227,7 +227,7 @@ keras/src/backend/torch/layer.py,sha256=htECdpv9ioHWM8_zqQkEdxgDsgLu8XJi5yXgnLl-
|
|
|
227
227
|
keras/src/backend/torch/linalg.py,sha256=wgPCfnscp5HOBmX9_-m-57lzxs1ttLNzmHqj2VYYq7k,2108
|
|
228
228
|
keras/src/backend/torch/math.py,sha256=g-ElDii2Y_o1-t6BAu2nbS7JH-aPqVS5Fqds8aYzIlg,14324
|
|
229
229
|
keras/src/backend/torch/nn.py,sha256=80MdDzkN7wV3MJbNsGh9B8IkdBoXC36wQcV8_o13y-8,37688
|
|
230
|
-
keras/src/backend/torch/numpy.py,sha256=
|
|
230
|
+
keras/src/backend/torch/numpy.py,sha256=raM9hy7dS3EsHghiKVXWx4X3Cqwtr0h4d8QjqYubR6o,59821
|
|
231
231
|
keras/src/backend/torch/random.py,sha256=YhLfC7qkGpzlU_i6gGPVormo3BMSo7OUA3TC3GCehrA,8292
|
|
232
232
|
keras/src/backend/torch/rnn.py,sha256=MJIVbHKsUA2dZm4Gu2NvRxlrFCWeWSxSZRmFxSsC3Zg,26041
|
|
233
233
|
keras/src/backend/torch/trainer.py,sha256=dcikz1c5O0FHNzRKSi6WhIHsHfLV2HDlrXPElSd1cgE,17985
|
|
@@ -284,7 +284,7 @@ keras/src/dtype_policies/dtype_policy_map.py,sha256=DqDYlssUGSiTqawPpaVRvR6ljYD8
|
|
|
284
284
|
keras/src/export/__init__.py,sha256=wQfjXEPN1YO2n0gz-7Eme0y_vq86s3SEWkZgs534sns,366
|
|
285
285
|
keras/src/export/export_utils.py,sha256=DpfA5yI37gaMjyESxGTlf7aQ8FhYp0u8LQKxyKiFaoU,5585
|
|
286
286
|
keras/src/export/litert.py,sha256=zI9q1n1Qi1w_7vyBVMBrPNGRGcafmeOmwJKtVCAVYHc,8995
|
|
287
|
-
keras/src/export/onnx.py,sha256=
|
|
287
|
+
keras/src/export/onnx.py,sha256=OATNkvx6DqJLavEBwN_EY2JUZ8JmquwtXf9Sd4uuO6Y,11206
|
|
288
288
|
keras/src/export/openvino.py,sha256=C9QNCOQ8-MwoOr8ZUqQvGzWY_CHOb8yDlMIJ9NYsLyw,7626
|
|
289
289
|
keras/src/export/saved_model.py,sha256=bxcsVd87MXnw3ENKu_dbUc8JzPFqjOAPbLL0U5KqG-g,28425
|
|
290
290
|
keras/src/export/tf2onnx_lib.py,sha256=cvHXS84Ocjcp1cTh5SziXAzNUsZ51RqjXNhOk5IlNDs,7234
|
|
@@ -499,12 +499,12 @@ keras/src/ops/__init__.py,sha256=aORlvnrqY_eQl0EFLWdpHsXHnQ6JLSw1qhwJMr-VXJ0,644
|
|
|
499
499
|
keras/src/ops/core.py,sha256=1L74Jox7wY6R_DFBzVVS3VjLlIKbE0sxyK5x-pjzx8Q,42779
|
|
500
500
|
keras/src/ops/einops.py,sha256=-pxW0_AzDQNsR7t2TJrzvYXBJpmLYA3fJoO0U_U96PY,6268
|
|
501
501
|
keras/src/ops/function.py,sha256=QV9n1-xeTPDK_FJ3sjlHDWVH2sqDj96R6YQnJueMOlA,17821
|
|
502
|
-
keras/src/ops/image.py,sha256=
|
|
502
|
+
keras/src/ops/image.py,sha256=JtGvaPiqv04o8ZQJfOpZDXy8aXzPfn-MwnErfKcVHd4,67405
|
|
503
503
|
keras/src/ops/linalg.py,sha256=3V8S_cgNxZZCIFcFj-FBHTdRqWNbimDtumMvfoc0f30,26736
|
|
504
504
|
keras/src/ops/math.py,sha256=4qYMJ5qAPmeSyeF63YWoGbUkQt6f4_VX0enOChU4mXU,37233
|
|
505
505
|
keras/src/ops/nn.py,sha256=04gjHB2BWusy4tWm59EO5Ns1paJC5umDNGwNCKzaJWQ,104658
|
|
506
506
|
keras/src/ops/node.py,sha256=aJgn9D-GkteE--Bbt2cZ9JjVxb2W2uS1OWEKoeLsl3Y,5583
|
|
507
|
-
keras/src/ops/numpy.py,sha256=
|
|
507
|
+
keras/src/ops/numpy.py,sha256=y7rm9-2QT9m9xKWFYivJx7U8EoXDfNMEGK9hyZANBvI,266299
|
|
508
508
|
keras/src/ops/operation.py,sha256=A7sh9Hi6kZb7wkeMmhrDQIq770ofANXuP-Qg-kwCM3o,15485
|
|
509
509
|
keras/src/ops/operation_utils.py,sha256=C6eThl-haKzlDH0fC1rn5-P1P-pCfIfXs-fy-ADR534,14523
|
|
510
510
|
keras/src/ops/symbolic_arguments.py,sha256=MKwXxZYkyouD9BPmQ1uUNxILdcwPvTayAqXaUV3P3o4,1628
|
|
@@ -550,8 +550,8 @@ keras/src/saving/orbax_util.py,sha256=ArJI9hQODUyyvzCiXt8AS3VH6E4SL0vF02-RHBk30g
|
|
|
550
550
|
keras/src/saving/saving_api.py,sha256=X-zsTum57M3AWf_cYqhq_o5wgLcFxNh1ditoxbL_0LY,14694
|
|
551
551
|
keras/src/saving/saving_lib.py,sha256=bRI8TeNOlflTfX3njSkkwNv-VYip-OW7ienIm0lL96I,58920
|
|
552
552
|
keras/src/saving/serialization_lib.py,sha256=yzCTm8hin__MGA2N5M5F-8Zbts5ZJVmINbrH4wEtIwI,30334
|
|
553
|
-
keras/src/testing/__init__.py,sha256=
|
|
554
|
-
keras/src/testing/test_case.py,sha256=
|
|
553
|
+
keras/src/testing/__init__.py,sha256=RQ5ZZ88NhcDTHCIpPulvaiTOTdJqAMH9ZhptXyMcqqY,368
|
|
554
|
+
keras/src/testing/test_case.py,sha256=24aSbz5WG5ICUavaLWo-PWBD_O9T7feRGvrLoNS19Q8,31780
|
|
555
555
|
keras/src/testing/test_utils.py,sha256=6Vb8tJIyjU1ay63w3jvXNNhh7sSNrosQll4ii1NXELQ,6197
|
|
556
556
|
keras/src/trainers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
557
557
|
keras/src/trainers/compile_utils.py,sha256=k5FDn7we0RN9fhRslY_WOQZRFwfzjqpYmiDeOKkAKqk,31260
|
|
@@ -618,7 +618,7 @@ keras/utils/bounding_boxes/__init__.py,sha256=jtvQll4u8ZY0Z96HwNhP1nxWEG9FM3gI-6
|
|
|
618
618
|
keras/utils/legacy/__init__.py,sha256=oSYZz6uS8UxSElRaaJYWJEoweJ4GAasZjnn7fNaOlog,342
|
|
619
619
|
keras/visualization/__init__.py,sha256=UKWmiy6sps4SWlmQi9WX8_Z53cPpLlphz2zIeHdwJpQ,722
|
|
620
620
|
keras/wrappers/__init__.py,sha256=QkS-O5K8qGS7C3sytF8MpmO6PasATpNVGF8qtb7Ojsw,407
|
|
621
|
-
keras_nightly-3.14.0.
|
|
622
|
-
keras_nightly-3.14.0.
|
|
623
|
-
keras_nightly-3.14.0.
|
|
624
|
-
keras_nightly-3.14.0.
|
|
621
|
+
keras_nightly-3.14.0.dev2026020304.dist-info/METADATA,sha256=hy2sly87dVSoOiutBAyDJXlZX_b0UIWgCmSfphO4lwU,6339
|
|
622
|
+
keras_nightly-3.14.0.dev2026020304.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
623
|
+
keras_nightly-3.14.0.dev2026020304.dist-info/top_level.txt,sha256=ptcw_-QuGZ4ZDjMdwi_Z0clZm8QAqFdvzzFnDEOTs9o,6
|
|
624
|
+
keras_nightly-3.14.0.dev2026020304.dist-info/RECORD,,
|
{keras_nightly-3.14.0.dev2026013104.dist-info → keras_nightly-3.14.0.dev2026020304.dist-info}/WHEEL
RENAMED
|
File without changes
|
|
File without changes
|