keras-nightly 3.14.0.dev2026020104__py3-none-any.whl → 3.14.0.dev2026020404__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- keras/_tf_keras/keras/ops/__init__.py +1 -0
- keras/_tf_keras/keras/ops/numpy/__init__.py +1 -0
- keras/ops/__init__.py +1 -0
- keras/ops/numpy/__init__.py +1 -0
- keras/src/backend/jax/numpy.py +5 -0
- keras/src/backend/numpy/numpy.py +13 -0
- keras/src/backend/openvino/numpy.py +78 -1
- keras/src/backend/tensorflow/numpy.py +10 -0
- keras/src/backend/torch/numpy.py +19 -0
- keras/src/export/onnx.py +106 -27
- keras/src/ops/numpy.py +66 -0
- keras/src/testing/__init__.py +1 -0
- keras/src/testing/test_case.py +48 -24
- keras/src/version.py +1 -1
- {keras_nightly-3.14.0.dev2026020104.dist-info → keras_nightly-3.14.0.dev2026020404.dist-info}/METADATA +1 -1
- {keras_nightly-3.14.0.dev2026020104.dist-info → keras_nightly-3.14.0.dev2026020404.dist-info}/RECORD +18 -18
- {keras_nightly-3.14.0.dev2026020104.dist-info → keras_nightly-3.14.0.dev2026020404.dist-info}/WHEEL +0 -0
- {keras_nightly-3.14.0.dev2026020104.dist-info → keras_nightly-3.14.0.dev2026020404.dist-info}/top_level.txt +0 -0
|
@@ -248,6 +248,7 @@ from keras.src.ops.numpy import nan_to_num as nan_to_num
|
|
|
248
248
|
from keras.src.ops.numpy import nanmax as nanmax
|
|
249
249
|
from keras.src.ops.numpy import nanmean as nanmean
|
|
250
250
|
from keras.src.ops.numpy import nanmin as nanmin
|
|
251
|
+
from keras.src.ops.numpy import nanprod as nanprod
|
|
251
252
|
from keras.src.ops.numpy import nansum as nansum
|
|
252
253
|
from keras.src.ops.numpy import ndim as ndim
|
|
253
254
|
from keras.src.ops.numpy import negative as negative
|
|
@@ -132,6 +132,7 @@ from keras.src.ops.numpy import nan_to_num as nan_to_num
|
|
|
132
132
|
from keras.src.ops.numpy import nanmax as nanmax
|
|
133
133
|
from keras.src.ops.numpy import nanmean as nanmean
|
|
134
134
|
from keras.src.ops.numpy import nanmin as nanmin
|
|
135
|
+
from keras.src.ops.numpy import nanprod as nanprod
|
|
135
136
|
from keras.src.ops.numpy import nansum as nansum
|
|
136
137
|
from keras.src.ops.numpy import ndim as ndim
|
|
137
138
|
from keras.src.ops.numpy import negative as negative
|
keras/ops/__init__.py
CHANGED
|
@@ -248,6 +248,7 @@ from keras.src.ops.numpy import nan_to_num as nan_to_num
|
|
|
248
248
|
from keras.src.ops.numpy import nanmax as nanmax
|
|
249
249
|
from keras.src.ops.numpy import nanmean as nanmean
|
|
250
250
|
from keras.src.ops.numpy import nanmin as nanmin
|
|
251
|
+
from keras.src.ops.numpy import nanprod as nanprod
|
|
251
252
|
from keras.src.ops.numpy import nansum as nansum
|
|
252
253
|
from keras.src.ops.numpy import ndim as ndim
|
|
253
254
|
from keras.src.ops.numpy import negative as negative
|
keras/ops/numpy/__init__.py
CHANGED
|
@@ -132,6 +132,7 @@ from keras.src.ops.numpy import nan_to_num as nan_to_num
|
|
|
132
132
|
from keras.src.ops.numpy import nanmax as nanmax
|
|
133
133
|
from keras.src.ops.numpy import nanmean as nanmean
|
|
134
134
|
from keras.src.ops.numpy import nanmin as nanmin
|
|
135
|
+
from keras.src.ops.numpy import nanprod as nanprod
|
|
135
136
|
from keras.src.ops.numpy import nansum as nansum
|
|
136
137
|
from keras.src.ops.numpy import ndim as ndim
|
|
137
138
|
from keras.src.ops.numpy import negative as negative
|
keras/src/backend/jax/numpy.py
CHANGED
|
@@ -1037,6 +1037,11 @@ def nanmin(x, axis=None, keepdims=False):
|
|
|
1037
1037
|
return jnp.nanmin(x, axis=axis, keepdims=keepdims)
|
|
1038
1038
|
|
|
1039
1039
|
|
|
1040
|
+
def nanprod(x, axis=None, keepdims=False):
|
|
1041
|
+
x = convert_to_tensor(x)
|
|
1042
|
+
return jnp.nanprod(x, axis=axis, keepdims=keepdims)
|
|
1043
|
+
|
|
1044
|
+
|
|
1040
1045
|
def nansum(x, axis=None, keepdims=False):
|
|
1041
1046
|
x = convert_to_tensor(x)
|
|
1042
1047
|
return jnp.nansum(x, axis=axis, keepdims=keepdims)
|
keras/src/backend/numpy/numpy.py
CHANGED
|
@@ -973,6 +973,19 @@ def nanmin(x, axis=None, keepdims=False):
|
|
|
973
973
|
return np.nanmin(x, axis=axis, keepdims=keepdims)
|
|
974
974
|
|
|
975
975
|
|
|
976
|
+
def nanprod(x, axis=None, keepdims=False):
|
|
977
|
+
axis = standardize_axis_for_numpy(axis)
|
|
978
|
+
|
|
979
|
+
x = convert_to_tensor(x)
|
|
980
|
+
|
|
981
|
+
dtype = dtypes.result_type(x.dtype)
|
|
982
|
+
if dtype in ("bool", "int8", "int16"):
|
|
983
|
+
dtype = "int32"
|
|
984
|
+
elif dtype in ("uint8", "uint16"):
|
|
985
|
+
dtype = "uint32"
|
|
986
|
+
return np.nanprod(x, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
987
|
+
|
|
988
|
+
|
|
976
989
|
def nansum(x, axis=None, keepdims=False):
|
|
977
990
|
axis = standardize_axis_for_numpy(axis)
|
|
978
991
|
dtype = standardize_dtype(x.dtype)
|
|
@@ -802,7 +802,78 @@ def count_nonzero(x, axis=None):
|
|
|
802
802
|
|
|
803
803
|
|
|
804
804
|
def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None):
|
|
805
|
-
|
|
805
|
+
if axis is not None:
|
|
806
|
+
axisa = axisb = axisc = axis
|
|
807
|
+
|
|
808
|
+
x1 = get_ov_output(x1)
|
|
809
|
+
x2 = get_ov_output(x2)
|
|
810
|
+
|
|
811
|
+
x1, x2 = _align_operand_types(x1, x2, "cross()")
|
|
812
|
+
|
|
813
|
+
shape1 = x1.get_partial_shape()
|
|
814
|
+
shape2 = x2.get_partial_shape()
|
|
815
|
+
|
|
816
|
+
# Rank Normalization
|
|
817
|
+
rank1 = shape1.rank.get_length()
|
|
818
|
+
rank2 = shape2.rank.get_length()
|
|
819
|
+
|
|
820
|
+
axisa = canonicalize_axis(axisa, rank1)
|
|
821
|
+
axisb = canonicalize_axis(axisb, rank2)
|
|
822
|
+
axisc = canonicalize_axis(axisc, rank1 if rank1 > rank2 else rank2)
|
|
823
|
+
|
|
824
|
+
d1 = shape1[axisa].get_length()
|
|
825
|
+
d2 = shape2[axisb].get_length()
|
|
826
|
+
|
|
827
|
+
if d1 not in (2, 3) or d2 not in (2, 3):
|
|
828
|
+
raise ValueError(
|
|
829
|
+
"Dimension of vectors for cross product must be 2 or 3. "
|
|
830
|
+
f"Got dimensions {d1} and {d2} for inputs x1 and x2."
|
|
831
|
+
)
|
|
832
|
+
|
|
833
|
+
# Pad to 3D by adding a zero component.
|
|
834
|
+
def pad_to_3d(x, dim, ax):
|
|
835
|
+
if dim == 3:
|
|
836
|
+
return x
|
|
837
|
+
|
|
838
|
+
# Create a slice of zeros with the same type as x
|
|
839
|
+
slice0 = ov_opset.gather(
|
|
840
|
+
x,
|
|
841
|
+
ov_opset.constant([0], Type.i32),
|
|
842
|
+
ov_opset.constant(ax, Type.i32),
|
|
843
|
+
)
|
|
844
|
+
zeros = ov_opset.multiply(
|
|
845
|
+
slice0,
|
|
846
|
+
ov_opset.constant(0, x.get_element_type()),
|
|
847
|
+
)
|
|
848
|
+
|
|
849
|
+
return ov_opset.concat([x, zeros], ax)
|
|
850
|
+
|
|
851
|
+
x1_3d = pad_to_3d(x1, d1, axisa)
|
|
852
|
+
x2_3d = pad_to_3d(x2, d2, axisb)
|
|
853
|
+
|
|
854
|
+
# Split Vectors
|
|
855
|
+
u = ov_opset.split(x1_3d, ov_opset.constant(axisa, Type.i32), 3).outputs()
|
|
856
|
+
v = ov_opset.split(x2_3d, ov_opset.constant(axisb, Type.i32), 3).outputs()
|
|
857
|
+
|
|
858
|
+
# u x v = (u2*v3 - u3*v2, u3*v1 - u1*v3, u1*v2 - u2*v1)
|
|
859
|
+
res_x = ov_opset.subtract(
|
|
860
|
+
ov_opset.multiply(u[1], v[2]), ov_opset.multiply(u[2], v[1])
|
|
861
|
+
)
|
|
862
|
+
res_y = ov_opset.subtract(
|
|
863
|
+
ov_opset.multiply(u[2], v[0]), ov_opset.multiply(u[0], v[2])
|
|
864
|
+
)
|
|
865
|
+
res_z = ov_opset.subtract(
|
|
866
|
+
ov_opset.multiply(u[0], v[1]), ov_opset.multiply(u[1], v[0])
|
|
867
|
+
)
|
|
868
|
+
|
|
869
|
+
# If dim was 2D, we remove the padded zero component.
|
|
870
|
+
if d1 == 2 and d2 == 2:
|
|
871
|
+
result = res_z
|
|
872
|
+
result = ov_opset.squeeze(result, ov_opset.constant([axisc], Type.i32))
|
|
873
|
+
else:
|
|
874
|
+
result = ov_opset.concat([res_x, res_y, res_z], axisc)
|
|
875
|
+
|
|
876
|
+
return OpenVINOKerasTensor(result.output(0))
|
|
806
877
|
|
|
807
878
|
|
|
808
879
|
def cumprod(x, axis=None, dtype=None):
|
|
@@ -2129,6 +2200,12 @@ def nanmin(x, axis=None, keepdims=False):
|
|
|
2129
2200
|
raise NotImplementedError("`nanmin` is not supported with openvino backend")
|
|
2130
2201
|
|
|
2131
2202
|
|
|
2203
|
+
def nanprod(x, axis=None, keepdims=False):
|
|
2204
|
+
raise NotImplementedError(
|
|
2205
|
+
"`nanprod` is not supported with openvino backend"
|
|
2206
|
+
)
|
|
2207
|
+
|
|
2208
|
+
|
|
2132
2209
|
def nansum(x, axis=None, keepdims=False):
|
|
2133
2210
|
x = get_ov_output(x)
|
|
2134
2211
|
x_type = x.get_element_type()
|
|
@@ -2184,6 +2184,16 @@ def nanmin(x, axis=None, keepdims=False):
|
|
|
2184
2184
|
)
|
|
2185
2185
|
|
|
2186
2186
|
|
|
2187
|
+
def nanprod(x, axis=None, keepdims=False):
|
|
2188
|
+
x = convert_to_tensor(x)
|
|
2189
|
+
|
|
2190
|
+
if not x.dtype.is_floating:
|
|
2191
|
+
return prod(x, axis=axis, keepdims=keepdims)
|
|
2192
|
+
|
|
2193
|
+
x_safe = tf.where(tf.math.is_nan(x), tf.ones((), dtype=x.dtype), x)
|
|
2194
|
+
return prod(x_safe, axis=axis, keepdims=keepdims)
|
|
2195
|
+
|
|
2196
|
+
|
|
2187
2197
|
def nansum(x, axis=None, keepdims=False):
|
|
2188
2198
|
x = convert_to_tensor(x)
|
|
2189
2199
|
dtype = standardize_dtype(x.dtype)
|
keras/src/backend/torch/numpy.py
CHANGED
|
@@ -1318,6 +1318,25 @@ def nanmin(x, axis=None, keepdims=False):
|
|
|
1318
1318
|
)
|
|
1319
1319
|
|
|
1320
1320
|
|
|
1321
|
+
def nanprod(x, axis=None, keepdims=False):
|
|
1322
|
+
x = convert_to_tensor(x)
|
|
1323
|
+
|
|
1324
|
+
if axis == () or axis == []:
|
|
1325
|
+
return torch.nan_to_num(x, nan=1)
|
|
1326
|
+
|
|
1327
|
+
if isinstance(axis, (list, tuple)):
|
|
1328
|
+
axis = sorted(axis, reverse=True)
|
|
1329
|
+
|
|
1330
|
+
if not torch.is_floating_point(x):
|
|
1331
|
+
return prod(x, axis=axis, keepdims=keepdims)
|
|
1332
|
+
|
|
1333
|
+
return prod(
|
|
1334
|
+
torch.where(torch.isnan(x), torch.ones((), dtype=x.dtype), x),
|
|
1335
|
+
axis=axis,
|
|
1336
|
+
keepdims=keepdims,
|
|
1337
|
+
)
|
|
1338
|
+
|
|
1339
|
+
|
|
1321
1340
|
def nansum(x, axis=None, keepdims=False):
|
|
1322
1341
|
if isinstance(x, (list, tuple)):
|
|
1323
1342
|
x = stack(x)
|
keras/src/export/onnx.py
CHANGED
|
@@ -76,13 +76,19 @@ def export_onnx(
|
|
|
76
76
|
if input_signature is None:
|
|
77
77
|
input_signature = get_input_signature(model)
|
|
78
78
|
if not input_signature or not model._called:
|
|
79
|
-
raise ValueError(
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
79
|
+
raise ValueError("The model provided has never called. ")
|
|
80
|
+
|
|
81
|
+
# Extract specs for proper input name generation
|
|
82
|
+
if len(input_signature) == 1 and isinstance(input_signature[0], list):
|
|
83
|
+
# Multi-input case: input_signature = [[spec1, spec2, ...]]
|
|
84
|
+
specs_for_names = input_signature[0]
|
|
85
|
+
else:
|
|
86
|
+
# Single input case: input_signature = [spec]
|
|
87
|
+
specs_for_names = input_signature
|
|
88
|
+
|
|
83
89
|
input_names = [
|
|
84
90
|
getattr(spec, "name", None) or f"input_{i}"
|
|
85
|
-
for i, spec in enumerate(
|
|
91
|
+
for i, spec in enumerate(specs_for_names)
|
|
86
92
|
]
|
|
87
93
|
|
|
88
94
|
if backend.backend() in ("tensorflow", "jax"):
|
|
@@ -105,10 +111,37 @@ def export_onnx(
|
|
|
105
111
|
elif backend.backend() == "torch":
|
|
106
112
|
import torch
|
|
107
113
|
|
|
114
|
+
"""Generate dynamic_axes format for ONNX export."""
|
|
115
|
+
dynamic_axes = {}
|
|
116
|
+
|
|
117
|
+
for input_idx, spec in enumerate(specs_for_names):
|
|
118
|
+
if not hasattr(spec, "shape"):
|
|
119
|
+
continue
|
|
120
|
+
|
|
121
|
+
shape = spec.shape
|
|
122
|
+
dynamic_dims = {}
|
|
123
|
+
|
|
124
|
+
for dim_idx, dim_size in enumerate(shape):
|
|
125
|
+
if dim_size is None:
|
|
126
|
+
if dim_idx == 0:
|
|
127
|
+
dim_name = "batch"
|
|
128
|
+
else:
|
|
129
|
+
dim_name = f"dim_{input_idx}_{dim_idx}"
|
|
130
|
+
dynamic_dims[dim_idx] = dim_name
|
|
131
|
+
|
|
132
|
+
if dynamic_dims:
|
|
133
|
+
input_name = (
|
|
134
|
+
input_names[input_idx]
|
|
135
|
+
if input_idx < len(input_names)
|
|
136
|
+
else f"input_{input_idx}"
|
|
137
|
+
)
|
|
138
|
+
dynamic_axes[input_name] = dynamic_dims
|
|
139
|
+
|
|
108
140
|
sample_inputs = tree.map_structure(
|
|
109
141
|
lambda x: convert_spec_to_tensor(x, replace_none_number=1),
|
|
110
142
|
input_signature,
|
|
111
143
|
)
|
|
144
|
+
|
|
112
145
|
sample_inputs = tuple(sample_inputs)
|
|
113
146
|
# TODO: Make dict model exportable.
|
|
114
147
|
if any(isinstance(x, dict) for x in sample_inputs):
|
|
@@ -140,34 +173,80 @@ def export_onnx(
|
|
|
140
173
|
warnings.filterwarnings(
|
|
141
174
|
"ignore", message=r".*suppressed about get_attr references.*"
|
|
142
175
|
)
|
|
176
|
+
# Suppress TorchScript tracing warnings
|
|
177
|
+
warnings.filterwarnings(
|
|
178
|
+
"ignore",
|
|
179
|
+
message=r".*Converting a tensor to a Python boolean.*",
|
|
180
|
+
category=torch.jit.TracerWarning,
|
|
181
|
+
)
|
|
182
|
+
warnings.filterwarnings(
|
|
183
|
+
"ignore",
|
|
184
|
+
message=r".*Converting a tensor to a Python integer.*",
|
|
185
|
+
category=torch.jit.TracerWarning,
|
|
186
|
+
)
|
|
187
|
+
warnings.filterwarnings(
|
|
188
|
+
"ignore",
|
|
189
|
+
message=r".*Iterating over a tensor.*",
|
|
190
|
+
category=torch.jit.TracerWarning,
|
|
191
|
+
)
|
|
192
|
+
warnings.filterwarnings(
|
|
193
|
+
"ignore",
|
|
194
|
+
message=r".*Using len to get tensor shape.*",
|
|
195
|
+
category=torch.jit.TracerWarning,
|
|
196
|
+
)
|
|
197
|
+
warnings.filterwarnings(
|
|
198
|
+
"ignore",
|
|
199
|
+
message=r".*torch.tensor results are registered as constants.*",
|
|
200
|
+
category=torch.jit.TracerWarning,
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
# When dynamic shapes are present, prefer TorchScript over
|
|
204
|
+
# TorchDynamo because TorchDynamo has constraint inference issues
|
|
205
|
+
# with dynamic dimensions
|
|
206
|
+
if not dynamic_axes:
|
|
143
207
|
try:
|
|
144
|
-
# Try the TorchDynamo-based ONNX exporter first
|
|
208
|
+
# Try the TorchDynamo-based ONNX exporter first for static
|
|
209
|
+
# shapes
|
|
210
|
+
export_kwargs = {
|
|
211
|
+
"verbose": actual_verbose,
|
|
212
|
+
"opset_version": opset_version,
|
|
213
|
+
"input_names": input_names,
|
|
214
|
+
"dynamo": True,
|
|
215
|
+
}
|
|
216
|
+
|
|
145
217
|
onnx_program = torch.onnx.export(
|
|
146
|
-
model,
|
|
147
|
-
sample_inputs,
|
|
148
|
-
verbose=actual_verbose,
|
|
149
|
-
opset_version=opset_version,
|
|
150
|
-
input_names=input_names,
|
|
151
|
-
dynamo=True,
|
|
218
|
+
model, sample_inputs, **export_kwargs
|
|
152
219
|
)
|
|
153
220
|
if hasattr(onnx_program, "optimize"):
|
|
154
221
|
onnx_program.optimize() # Only supported by torch>=2.6.0.
|
|
155
222
|
onnx_program.save(filepath)
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
223
|
+
|
|
224
|
+
return
|
|
225
|
+
except Exception:
|
|
226
|
+
pass
|
|
227
|
+
|
|
228
|
+
"""Export using TorchScript-based ONNX exporter."""
|
|
229
|
+
# Set verbose to False for TorchScript due to file system leakage
|
|
230
|
+
torchscript_verbose = verbose
|
|
231
|
+
if verbose is None:
|
|
232
|
+
# Set to `False` due to file system leakage issue:
|
|
233
|
+
# https://github.com/keras-team/keras/issues/20826
|
|
234
|
+
torchscript_verbose = False
|
|
235
|
+
|
|
236
|
+
export_kwargs = {
|
|
237
|
+
"verbose": torchscript_verbose,
|
|
238
|
+
"opset_version": opset_version,
|
|
239
|
+
"input_names": input_names,
|
|
240
|
+
"export_params": True,
|
|
241
|
+
"do_constant_folding": True,
|
|
242
|
+
"dynamo": False,
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
# For TorchScript (dynamo=False), use dynamic_axes parameter
|
|
246
|
+
if dynamic_axes:
|
|
247
|
+
export_kwargs["dynamic_axes"] = dynamic_axes
|
|
248
|
+
|
|
249
|
+
torch.onnx.export(model, sample_inputs, filepath, **export_kwargs)
|
|
171
250
|
else:
|
|
172
251
|
raise NotImplementedError(
|
|
173
252
|
"`export_onnx` is only compatible with TensorFlow, JAX and "
|
keras/src/ops/numpy.py
CHANGED
|
@@ -5231,6 +5231,72 @@ def nanmin(x, axis=None, keepdims=False):
|
|
|
5231
5231
|
return backend.numpy.nanmin(x, axis=axis, keepdims=keepdims)
|
|
5232
5232
|
|
|
5233
5233
|
|
|
5234
|
+
class Nanprod(Operation):
|
|
5235
|
+
def __init__(self, axis=None, keepdims=False, *, name=None):
|
|
5236
|
+
super().__init__(name=name)
|
|
5237
|
+
self.axis = axis
|
|
5238
|
+
self.keepdims = keepdims
|
|
5239
|
+
|
|
5240
|
+
def call(self, x):
|
|
5241
|
+
return backend.numpy.nanprod(
|
|
5242
|
+
x,
|
|
5243
|
+
axis=self.axis,
|
|
5244
|
+
keepdims=self.keepdims,
|
|
5245
|
+
)
|
|
5246
|
+
|
|
5247
|
+
def compute_output_spec(self, x):
|
|
5248
|
+
dtype = backend.standardize_dtype(x.dtype)
|
|
5249
|
+
|
|
5250
|
+
if dtype == "bool":
|
|
5251
|
+
dtype = "int32"
|
|
5252
|
+
elif dtype in ("int8", "int16"):
|
|
5253
|
+
dtype = "int32"
|
|
5254
|
+
elif dtype in ("uint8", "uint16"):
|
|
5255
|
+
dtype = "uint32"
|
|
5256
|
+
|
|
5257
|
+
if backend.backend() == "torch" and dtype == "uint32":
|
|
5258
|
+
dtype = "int32"
|
|
5259
|
+
|
|
5260
|
+
return KerasTensor(
|
|
5261
|
+
reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims),
|
|
5262
|
+
dtype=dtype,
|
|
5263
|
+
)
|
|
5264
|
+
|
|
5265
|
+
|
|
5266
|
+
@keras_export(["keras.ops.nanprod", "keras.ops.numpy.nanprod"])
|
|
5267
|
+
def nanprod(x, axis=None, keepdims=False):
|
|
5268
|
+
"""Product of a tensor over the given axes, ignoring NaNs.
|
|
5269
|
+
|
|
5270
|
+
Args:
|
|
5271
|
+
x: Input tensor.
|
|
5272
|
+
axis: Axis or axes along which the product is computed. The default is
|
|
5273
|
+
to compute the product of the flattened tensor.
|
|
5274
|
+
keepdims: If this is set to `True`, the axes which are reduced are left
|
|
5275
|
+
in the result as dimensions with size one.
|
|
5276
|
+
|
|
5277
|
+
Returns:
|
|
5278
|
+
Output tensor containing the product, with NaN values ignored.
|
|
5279
|
+
|
|
5280
|
+
Examples:
|
|
5281
|
+
>>> import numpy as np
|
|
5282
|
+
>>> from keras import ops
|
|
5283
|
+
>>> x = np.array([[1.0, np.nan, 3.0],
|
|
5284
|
+
... [np.nan, 2.0, 1.0]])
|
|
5285
|
+
>>> ops.nanprod(x)
|
|
5286
|
+
6.0
|
|
5287
|
+
|
|
5288
|
+
>>> ops.nanprod(x, axis=1)
|
|
5289
|
+
array([3., 2.])
|
|
5290
|
+
|
|
5291
|
+
>>> ops.nanprod(x, axis=1, keepdims=True)
|
|
5292
|
+
array([[3.],
|
|
5293
|
+
[2.]])
|
|
5294
|
+
"""
|
|
5295
|
+
if any_symbolic_tensors((x,)):
|
|
5296
|
+
return Nanprod(axis=axis, keepdims=keepdims).symbolic_call(x)
|
|
5297
|
+
return backend.numpy.nanprod(x, axis=axis, keepdims=keepdims)
|
|
5298
|
+
|
|
5299
|
+
|
|
5234
5300
|
class Nansum(Operation):
|
|
5235
5301
|
def __init__(self, axis=None, keepdims=False, *, name=None):
|
|
5236
5302
|
super().__init__(name=name)
|
keras/src/testing/__init__.py
CHANGED
|
@@ -2,6 +2,7 @@ from keras.src.testing.test_case import TestCase
|
|
|
2
2
|
from keras.src.testing.test_case import jax_uses_gpu
|
|
3
3
|
from keras.src.testing.test_case import jax_uses_tpu
|
|
4
4
|
from keras.src.testing.test_case import tensorflow_uses_gpu
|
|
5
|
+
from keras.src.testing.test_case import tensorflow_uses_tpu
|
|
5
6
|
from keras.src.testing.test_case import torch_uses_gpu
|
|
6
7
|
from keras.src.testing.test_case import uses_gpu
|
|
7
8
|
from keras.src.testing.test_case import uses_tpu
|
keras/src/testing/test_case.py
CHANGED
|
@@ -8,7 +8,6 @@ import numpy as np
|
|
|
8
8
|
from absl.testing import parameterized
|
|
9
9
|
|
|
10
10
|
from keras.src import backend
|
|
11
|
-
from keras.src import distribution
|
|
12
11
|
from keras.src import ops
|
|
13
12
|
from keras.src import tree
|
|
14
13
|
from keras.src import utils
|
|
@@ -626,43 +625,68 @@ class TestCase(parameterized.TestCase, unittest.TestCase):
|
|
|
626
625
|
self.assertEqual(dtype, "float32")
|
|
627
626
|
|
|
628
627
|
|
|
629
|
-
def
|
|
630
|
-
|
|
628
|
+
def _jax_uses(device_type):
|
|
629
|
+
import jax
|
|
631
630
|
|
|
631
|
+
return jax.default_backend() == device_type
|
|
632
632
|
|
|
633
|
-
def jax_uses_gpu():
|
|
634
|
-
return backend.backend() == "jax" and uses_gpu()
|
|
635
633
|
|
|
634
|
+
def _tensorflow_uses(device_type):
|
|
635
|
+
import tensorflow as tf
|
|
636
636
|
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
637
|
+
return len(tf.config.list_physical_devices(device_type.upper())) > 0
|
|
638
|
+
|
|
639
|
+
|
|
640
|
+
def _torch_uses(device_type):
|
|
641
|
+
if device_type == "gpu":
|
|
642
|
+
from keras.src.backend.torch.core import get_device
|
|
641
643
|
|
|
642
|
-
|
|
644
|
+
return get_device() == "cuda"
|
|
645
|
+
return device_type == "cpu"
|
|
643
646
|
|
|
644
647
|
|
|
645
648
|
def uses_gpu():
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
649
|
+
if not hasattr(uses_gpu, "_value"):
|
|
650
|
+
if backend.backend() == "tensorflow":
|
|
651
|
+
uses_gpu._value = _tensorflow_uses("gpu")
|
|
652
|
+
elif backend.backend() == "jax":
|
|
653
|
+
uses_gpu._value = _jax_uses("gpu")
|
|
654
|
+
elif backend.backend() == "torch":
|
|
655
|
+
uses_gpu._value = _torch_uses("gpu")
|
|
656
|
+
else:
|
|
657
|
+
uses_gpu._value = False
|
|
658
|
+
return uses_gpu._value
|
|
659
|
+
|
|
660
|
+
|
|
661
|
+
def uses_tpu():
|
|
662
|
+
if not hasattr(uses_tpu, "_value"):
|
|
663
|
+
if backend.backend() == "tensorflow":
|
|
664
|
+
uses_tpu._value = _tensorflow_uses("tpu")
|
|
665
|
+
elif backend.backend() == "jax":
|
|
666
|
+
uses_tpu._value = _jax_uses("tpu")
|
|
667
|
+
else:
|
|
668
|
+
uses_tpu._value = False
|
|
669
|
+
return uses_tpu._value
|
|
670
|
+
|
|
671
|
+
|
|
672
|
+
def jax_uses_gpu():
|
|
673
|
+
return backend.backend() == "jax" and uses_gpu()
|
|
651
674
|
|
|
652
675
|
|
|
653
676
|
def jax_uses_tpu():
|
|
654
677
|
return backend.backend() == "jax" and uses_tpu()
|
|
655
678
|
|
|
656
679
|
|
|
657
|
-
def
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
680
|
+
def tensorflow_uses_gpu():
|
|
681
|
+
return backend.backend() == "tensorflow" and uses_gpu()
|
|
682
|
+
|
|
683
|
+
|
|
684
|
+
def tensorflow_uses_tpu():
|
|
685
|
+
return backend.backend() == "tensorflow" and uses_tpu()
|
|
686
|
+
|
|
687
|
+
|
|
688
|
+
def torch_uses_gpu():
|
|
689
|
+
return backend.backend() == "torch" and uses_gpu()
|
|
666
690
|
|
|
667
691
|
|
|
668
692
|
def create_keras_tensors(input_shape, dtype, sparse, ragged):
|
keras/src/version.py
CHANGED
{keras_nightly-3.14.0.dev2026020104.dist-info → keras_nightly-3.14.0.dev2026020404.dist-info}/RECORD
RENAMED
|
@@ -45,11 +45,11 @@ keras/_tf_keras/keras/losses/__init__.py,sha256=xBc_KOtSLwp3h3CKQ0EnCuIy-Bsak2SP
|
|
|
45
45
|
keras/_tf_keras/keras/metrics/__init__.py,sha256=_wF31PTvua5ahF9JEW4Hx1UVNjVCLqVI8J5JNrZCBf8,6546
|
|
46
46
|
keras/_tf_keras/keras/mixed_precision/__init__.py,sha256=AM51CzHqzcY75tqdpQiuVcTRUEpUzBqeb-EfLeSDSV8,727
|
|
47
47
|
keras/_tf_keras/keras/models/__init__.py,sha256=83pyA0pzytqin8JLV6FEbPreCb-V64ToebxFGrHsVdQ,501
|
|
48
|
-
keras/_tf_keras/keras/ops/__init__.py,sha256=
|
|
48
|
+
keras/_tf_keras/keras/ops/__init__.py,sha256=VUR7wlaKfOzMloUgtfMVLNyNQvD1GxLNrJ0YIRC4754,15818
|
|
49
49
|
keras/_tf_keras/keras/ops/image/__init__.py,sha256=oM_PLh5Jk9OGfi1bbJcfWkjoq0Ye5JQG9a7v_KzDfoc,1034
|
|
50
50
|
keras/_tf_keras/keras/ops/linalg/__init__.py,sha256=0ab6icK3yuIm4khSfAksGRFLEAJhaOu6gGgarau4iEQ,822
|
|
51
51
|
keras/_tf_keras/keras/ops/nn/__init__.py,sha256=2eD8IlkfBrsmJjHpzsxMM3_058oGeZVgohdBd27iDnI,2992
|
|
52
|
-
keras/_tf_keras/keras/ops/numpy/__init__.py,sha256=
|
|
52
|
+
keras/_tf_keras/keras/ops/numpy/__init__.py,sha256=V7sHwuFJ9dxD06bcckB6VTjstR-LurGes9JXyA9BhiY,9880
|
|
53
53
|
keras/_tf_keras/keras/optimizers/__init__.py,sha256=1fx0vEB-oGu-9dumxoIvX4qVHdgJvf74OLyYoBkE2y0,1267
|
|
54
54
|
keras/_tf_keras/keras/optimizers/legacy/__init__.py,sha256=uIMQESCV80Q0FY-9ikQUjXYPyZqmTfAM3dfohQ5DzYs,516
|
|
55
55
|
keras/_tf_keras/keras/optimizers/schedules/__init__.py,sha256=pQF3rQiAPuUSTUdflTr-fpL77oyGIv9xzGdjae3M3kw,1120
|
|
@@ -111,11 +111,11 @@ keras/losses/__init__.py,sha256=VIXBHQFNdLUPZ7JuwtIKj_4E-xf2yvNyrmdklvjr_xM,3667
|
|
|
111
111
|
keras/metrics/__init__.py,sha256=qeEwtqpSCAaCr8BMUv1eVaqJl2Zb83OB5K0BG3JB0nI,6245
|
|
112
112
|
keras/mixed_precision/__init__.py,sha256=AM51CzHqzcY75tqdpQiuVcTRUEpUzBqeb-EfLeSDSV8,727
|
|
113
113
|
keras/models/__init__.py,sha256=83pyA0pzytqin8JLV6FEbPreCb-V64ToebxFGrHsVdQ,501
|
|
114
|
-
keras/ops/__init__.py,sha256=
|
|
114
|
+
keras/ops/__init__.py,sha256=VUR7wlaKfOzMloUgtfMVLNyNQvD1GxLNrJ0YIRC4754,15818
|
|
115
115
|
keras/ops/image/__init__.py,sha256=oM_PLh5Jk9OGfi1bbJcfWkjoq0Ye5JQG9a7v_KzDfoc,1034
|
|
116
116
|
keras/ops/linalg/__init__.py,sha256=0ab6icK3yuIm4khSfAksGRFLEAJhaOu6gGgarau4iEQ,822
|
|
117
117
|
keras/ops/nn/__init__.py,sha256=2eD8IlkfBrsmJjHpzsxMM3_058oGeZVgohdBd27iDnI,2992
|
|
118
|
-
keras/ops/numpy/__init__.py,sha256=
|
|
118
|
+
keras/ops/numpy/__init__.py,sha256=V7sHwuFJ9dxD06bcckB6VTjstR-LurGes9JXyA9BhiY,9880
|
|
119
119
|
keras/optimizers/__init__.py,sha256=1fx0vEB-oGu-9dumxoIvX4qVHdgJvf74OLyYoBkE2y0,1267
|
|
120
120
|
keras/optimizers/legacy/__init__.py,sha256=uIMQESCV80Q0FY-9ikQUjXYPyZqmTfAM3dfohQ5DzYs,516
|
|
121
121
|
keras/optimizers/schedules/__init__.py,sha256=pQF3rQiAPuUSTUdflTr-fpL77oyGIv9xzGdjae3M3kw,1120
|
|
@@ -128,7 +128,7 @@ keras/regularizers/__init__.py,sha256=542Shphw7W8h4Dyf2rmqMKUECVZ8IVBvN9g1LWhz-b
|
|
|
128
128
|
keras/saving/__init__.py,sha256=KvL2GZxjvgFgEhvEnkvqjIR9JSNHKz-NWZacXajsjLI,1298
|
|
129
129
|
keras/src/__init__.py,sha256=Gi4S7EiCMkE03PbdGNpFdaUYySWDs_FcAJ8Taz9Y1BE,684
|
|
130
130
|
keras/src/api_export.py,sha256=gXOkBOnmscV013WAc75lc4Up01-Kkg9EylIAT_QWctg,1173
|
|
131
|
-
keras/src/version.py,sha256=
|
|
131
|
+
keras/src/version.py,sha256=FhnIBR0HLjhX-AjhyRTy_cn4TF4qatX-aS3GrHpvUok,204
|
|
132
132
|
keras/src/activations/__init__.py,sha256=0nL3IFDB9unlrMz8ninKOWo-uCHasTUpTo1tXZb2u44,4433
|
|
133
133
|
keras/src/activations/activations.py,sha256=mogPggtp4CGldI3VOPNmesRxp6EbiR1_i4KLGaVwzL8,17614
|
|
134
134
|
keras/src/applications/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -171,7 +171,7 @@ keras/src/backend/jax/layer.py,sha256=o6CicT06udwamTRQIjNSDLZLyYHFzBXNbxewXgWe0i
|
|
|
171
171
|
keras/src/backend/jax/linalg.py,sha256=LDaLZYz49ChE2kJR3YpaM9xuwusvd3krV7nNAAazTWA,2642
|
|
172
172
|
keras/src/backend/jax/math.py,sha256=1IEDpdoF8e5ltu3D4wbDQuihzvJHhMXz8W9Z_E-eJqU,9391
|
|
173
173
|
keras/src/backend/jax/nn.py,sha256=mrRawNvf9EWe8rdTwK_Auz6xdLkVG6hH0nIAP7hyUDE,60271
|
|
174
|
-
keras/src/backend/jax/numpy.py,sha256=
|
|
174
|
+
keras/src/backend/jax/numpy.py,sha256=DBDcy3cSkNUsI4yQ_8bnP9U7bEzhyUhaMMd6r2tkCdY,39617
|
|
175
175
|
keras/src/backend/jax/optimizer.py,sha256=5DeXQHcYmUI6F9i1m1VHn3sBt4LEStOeBXnKdESevLM,4134
|
|
176
176
|
keras/src/backend/jax/random.py,sha256=Uk2huGIk_dlzMrx5eDVrrr2TeCEMitn2vr4yzA0NXjs,3594
|
|
177
177
|
keras/src/backend/jax/rnn.py,sha256=Ycq0qfLY4M4jhltvztpLQyywjEM17T7CZQFh4hhHOUE,7767
|
|
@@ -186,7 +186,7 @@ keras/src/backend/numpy/layer.py,sha256=dTk7W7ql7vRgll7JbOXK5PlIhQw5VHdpSjKciHd8
|
|
|
186
186
|
keras/src/backend/numpy/linalg.py,sha256=uzLTxEyuX_gDcnoA5Q59GdTg33py0WooKK5T6T9Td6c,2543
|
|
187
187
|
keras/src/backend/numpy/math.py,sha256=HdkEA5ro7dtQBTP78GFIgqTFLgNQ49PXHhqI1vLRGfo,10169
|
|
188
188
|
keras/src/backend/numpy/nn.py,sha256=P9JAnTlwSTI7bVv8WIv1pDQJHpjML_WJ0RsJWy-LJMc,46200
|
|
189
|
-
keras/src/backend/numpy/numpy.py,sha256=
|
|
189
|
+
keras/src/backend/numpy/numpy.py,sha256=FvJmqSUBhIsAujUpkGxz6SADHD3LEG5hsMKrNEj9u9g,38805
|
|
190
190
|
keras/src/backend/numpy/random.py,sha256=wx2nE75q7L2cBMjtQlQx8yKMj4Ie3puFMDQsbrZO8SA,3961
|
|
191
191
|
keras/src/backend/numpy/rnn.py,sha256=thOsMung1qR3lQsR4_D6hqKMFollQgrB0KwsJLk4BMY,7867
|
|
192
192
|
keras/src/backend/numpy/trainer.py,sha256=MzWr8_LLHa1P6fxdUWirGw_lQwHGF_vkZ7RUGLUzjUs,11126
|
|
@@ -198,7 +198,7 @@ keras/src/backend/openvino/layer.py,sha256=5RdvaH1yOyPAphjKiuQAK1H_yZFYKE1Hp7c5b
|
|
|
198
198
|
keras/src/backend/openvino/linalg.py,sha256=L6a4MFGND2wWzPVCh44cwuOgkcC4wJTo8Xy3HwW04lg,1614
|
|
199
199
|
keras/src/backend/openvino/math.py,sha256=qw9kX2sJ2qr0dBJF12Ey0E2GcwixPUqoev6UcNra4NI,3944
|
|
200
200
|
keras/src/backend/openvino/nn.py,sha256=zULPxdwVO7JDZUUtsuoEEPCLQ09ew8z8T6G_i_NEqrM,23741
|
|
201
|
-
keras/src/backend/openvino/numpy.py,sha256=
|
|
201
|
+
keras/src/backend/openvino/numpy.py,sha256=vPmpxizrgQyShUTeAaVSI38ANX6z2uHYiAf9b-4n9e4,115691
|
|
202
202
|
keras/src/backend/openvino/random.py,sha256=4hRUtIP6qJxO3Qy9uH1x6jSuJna3nWPdUf4x2QU8-ew,5575
|
|
203
203
|
keras/src/backend/openvino/rnn.py,sha256=ErmuZLPSgG9qU-NfYPPvBZ6Ysy8k-fA4g19Vhqq7OVQ,866
|
|
204
204
|
keras/src/backend/openvino/trainer.py,sha256=bMmtSALqydqdS6ke-5sYW5fgxZDshDH810p_C0xCRTg,9087
|
|
@@ -211,7 +211,7 @@ keras/src/backend/tensorflow/layer.py,sha256=69d40LwL4HhKRsCjj1VRpjfrQXXF8VV3vh0
|
|
|
211
211
|
keras/src/backend/tensorflow/linalg.py,sha256=_lZVfdY1tFvrN7xwbt3INGoTR0yC5v-kI1Q0XppVibY,8773
|
|
212
212
|
keras/src/backend/tensorflow/math.py,sha256=zTu_7Ff6B2Ro862z_xH0OCmIWbV74DjsO5UnfjYuOUQ,12370
|
|
213
213
|
keras/src/backend/tensorflow/nn.py,sha256=6vtZHzUED6_blUPE1Tnc3GAxPpJ2ebxoaiMn80tTL9k,51328
|
|
214
|
-
keras/src/backend/tensorflow/numpy.py,sha256=
|
|
214
|
+
keras/src/backend/tensorflow/numpy.py,sha256=czmwm3J6I3UouRWpujDRDWo6Q80ZnHEJK8-jH6--uxw,107098
|
|
215
215
|
keras/src/backend/tensorflow/optimizer.py,sha256=kFlyEOnGjEYdLpd8mpwhUeku78__xBfZbbrDWpJrq60,9307
|
|
216
216
|
keras/src/backend/tensorflow/random.py,sha256=iO8V_soaDXZm9ewyAVbjudhsMj08C348c9Bz64nxXC4,6475
|
|
217
217
|
keras/src/backend/tensorflow/rnn.py,sha256=JbOSpt48cm612c7YwiTYOQCQsNXyI_6QeRhtUn8qEvM,34829
|
|
@@ -227,7 +227,7 @@ keras/src/backend/torch/layer.py,sha256=htECdpv9ioHWM8_zqQkEdxgDsgLu8XJi5yXgnLl-
|
|
|
227
227
|
keras/src/backend/torch/linalg.py,sha256=wgPCfnscp5HOBmX9_-m-57lzxs1ttLNzmHqj2VYYq7k,2108
|
|
228
228
|
keras/src/backend/torch/math.py,sha256=g-ElDii2Y_o1-t6BAu2nbS7JH-aPqVS5Fqds8aYzIlg,14324
|
|
229
229
|
keras/src/backend/torch/nn.py,sha256=80MdDzkN7wV3MJbNsGh9B8IkdBoXC36wQcV8_o13y-8,37688
|
|
230
|
-
keras/src/backend/torch/numpy.py,sha256=
|
|
230
|
+
keras/src/backend/torch/numpy.py,sha256=raM9hy7dS3EsHghiKVXWx4X3Cqwtr0h4d8QjqYubR6o,59821
|
|
231
231
|
keras/src/backend/torch/random.py,sha256=YhLfC7qkGpzlU_i6gGPVormo3BMSo7OUA3TC3GCehrA,8292
|
|
232
232
|
keras/src/backend/torch/rnn.py,sha256=MJIVbHKsUA2dZm4Gu2NvRxlrFCWeWSxSZRmFxSsC3Zg,26041
|
|
233
233
|
keras/src/backend/torch/trainer.py,sha256=dcikz1c5O0FHNzRKSi6WhIHsHfLV2HDlrXPElSd1cgE,17985
|
|
@@ -284,7 +284,7 @@ keras/src/dtype_policies/dtype_policy_map.py,sha256=DqDYlssUGSiTqawPpaVRvR6ljYD8
|
|
|
284
284
|
keras/src/export/__init__.py,sha256=wQfjXEPN1YO2n0gz-7Eme0y_vq86s3SEWkZgs534sns,366
|
|
285
285
|
keras/src/export/export_utils.py,sha256=DpfA5yI37gaMjyESxGTlf7aQ8FhYp0u8LQKxyKiFaoU,5585
|
|
286
286
|
keras/src/export/litert.py,sha256=zI9q1n1Qi1w_7vyBVMBrPNGRGcafmeOmwJKtVCAVYHc,8995
|
|
287
|
-
keras/src/export/onnx.py,sha256=
|
|
287
|
+
keras/src/export/onnx.py,sha256=OATNkvx6DqJLavEBwN_EY2JUZ8JmquwtXf9Sd4uuO6Y,11206
|
|
288
288
|
keras/src/export/openvino.py,sha256=C9QNCOQ8-MwoOr8ZUqQvGzWY_CHOb8yDlMIJ9NYsLyw,7626
|
|
289
289
|
keras/src/export/saved_model.py,sha256=bxcsVd87MXnw3ENKu_dbUc8JzPFqjOAPbLL0U5KqG-g,28425
|
|
290
290
|
keras/src/export/tf2onnx_lib.py,sha256=cvHXS84Ocjcp1cTh5SziXAzNUsZ51RqjXNhOk5IlNDs,7234
|
|
@@ -504,7 +504,7 @@ keras/src/ops/linalg.py,sha256=3V8S_cgNxZZCIFcFj-FBHTdRqWNbimDtumMvfoc0f30,26736
|
|
|
504
504
|
keras/src/ops/math.py,sha256=4qYMJ5qAPmeSyeF63YWoGbUkQt6f4_VX0enOChU4mXU,37233
|
|
505
505
|
keras/src/ops/nn.py,sha256=04gjHB2BWusy4tWm59EO5Ns1paJC5umDNGwNCKzaJWQ,104658
|
|
506
506
|
keras/src/ops/node.py,sha256=aJgn9D-GkteE--Bbt2cZ9JjVxb2W2uS1OWEKoeLsl3Y,5583
|
|
507
|
-
keras/src/ops/numpy.py,sha256=
|
|
507
|
+
keras/src/ops/numpy.py,sha256=y7rm9-2QT9m9xKWFYivJx7U8EoXDfNMEGK9hyZANBvI,266299
|
|
508
508
|
keras/src/ops/operation.py,sha256=A7sh9Hi6kZb7wkeMmhrDQIq770ofANXuP-Qg-kwCM3o,15485
|
|
509
509
|
keras/src/ops/operation_utils.py,sha256=C6eThl-haKzlDH0fC1rn5-P1P-pCfIfXs-fy-ADR534,14523
|
|
510
510
|
keras/src/ops/symbolic_arguments.py,sha256=MKwXxZYkyouD9BPmQ1uUNxILdcwPvTayAqXaUV3P3o4,1628
|
|
@@ -550,8 +550,8 @@ keras/src/saving/orbax_util.py,sha256=ArJI9hQODUyyvzCiXt8AS3VH6E4SL0vF02-RHBk30g
|
|
|
550
550
|
keras/src/saving/saving_api.py,sha256=X-zsTum57M3AWf_cYqhq_o5wgLcFxNh1ditoxbL_0LY,14694
|
|
551
551
|
keras/src/saving/saving_lib.py,sha256=bRI8TeNOlflTfX3njSkkwNv-VYip-OW7ienIm0lL96I,58920
|
|
552
552
|
keras/src/saving/serialization_lib.py,sha256=yzCTm8hin__MGA2N5M5F-8Zbts5ZJVmINbrH4wEtIwI,30334
|
|
553
|
-
keras/src/testing/__init__.py,sha256=
|
|
554
|
-
keras/src/testing/test_case.py,sha256=
|
|
553
|
+
keras/src/testing/__init__.py,sha256=29k8fvygLsF33eSTUowVIEEQLRB3AzTQRvYotEOe3U8,428
|
|
554
|
+
keras/src/testing/test_case.py,sha256=FrQiXRvtarSv7G9g1PK8GJYJeMsKVg0GMzsLFivMopI,32432
|
|
555
555
|
keras/src/testing/test_utils.py,sha256=6Vb8tJIyjU1ay63w3jvXNNhh7sSNrosQll4ii1NXELQ,6197
|
|
556
556
|
keras/src/trainers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
557
557
|
keras/src/trainers/compile_utils.py,sha256=k5FDn7we0RN9fhRslY_WOQZRFwfzjqpYmiDeOKkAKqk,31260
|
|
@@ -618,7 +618,7 @@ keras/utils/bounding_boxes/__init__.py,sha256=jtvQll4u8ZY0Z96HwNhP1nxWEG9FM3gI-6
|
|
|
618
618
|
keras/utils/legacy/__init__.py,sha256=oSYZz6uS8UxSElRaaJYWJEoweJ4GAasZjnn7fNaOlog,342
|
|
619
619
|
keras/visualization/__init__.py,sha256=UKWmiy6sps4SWlmQi9WX8_Z53cPpLlphz2zIeHdwJpQ,722
|
|
620
620
|
keras/wrappers/__init__.py,sha256=QkS-O5K8qGS7C3sytF8MpmO6PasATpNVGF8qtb7Ojsw,407
|
|
621
|
-
keras_nightly-3.14.0.
|
|
622
|
-
keras_nightly-3.14.0.
|
|
623
|
-
keras_nightly-3.14.0.
|
|
624
|
-
keras_nightly-3.14.0.
|
|
621
|
+
keras_nightly-3.14.0.dev2026020404.dist-info/METADATA,sha256=0_dkofG5lLCYTwNn1VT_Inp2boYDQMIjPvIW5QwXM9I,6339
|
|
622
|
+
keras_nightly-3.14.0.dev2026020404.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
623
|
+
keras_nightly-3.14.0.dev2026020404.dist-info/top_level.txt,sha256=ptcw_-QuGZ4ZDjMdwi_Z0clZm8QAqFdvzzFnDEOTs9o,6
|
|
624
|
+
keras_nightly-3.14.0.dev2026020404.dist-info/RECORD,,
|
{keras_nightly-3.14.0.dev2026020104.dist-info → keras_nightly-3.14.0.dev2026020404.dist-info}/WHEEL
RENAMED
|
File without changes
|
|
File without changes
|