keras-nightly 3.12.0.dev2025083103__py3-none-any.whl → 3.14.0.dev2026011604__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- keras/__init__.py +1 -0
- keras/_tf_keras/keras/__init__.py +1 -0
- keras/_tf_keras/keras/callbacks/__init__.py +3 -0
- keras/_tf_keras/keras/distillation/__init__.py +16 -0
- keras/_tf_keras/keras/distribution/__init__.py +3 -0
- keras/_tf_keras/keras/dtype_policies/__init__.py +6 -0
- keras/_tf_keras/keras/layers/__init__.py +21 -0
- keras/_tf_keras/keras/ops/__init__.py +16 -0
- keras/_tf_keras/keras/ops/image/__init__.py +1 -0
- keras/_tf_keras/keras/ops/linalg/__init__.py +1 -0
- keras/_tf_keras/keras/ops/nn/__init__.py +3 -0
- keras/_tf_keras/keras/ops/numpy/__init__.py +12 -0
- keras/_tf_keras/keras/quantizers/__init__.py +13 -0
- keras/callbacks/__init__.py +3 -0
- keras/distillation/__init__.py +16 -0
- keras/distribution/__init__.py +3 -0
- keras/dtype_policies/__init__.py +6 -0
- keras/layers/__init__.py +21 -0
- keras/ops/__init__.py +16 -0
- keras/ops/image/__init__.py +1 -0
- keras/ops/linalg/__init__.py +1 -0
- keras/ops/nn/__init__.py +3 -0
- keras/ops/numpy/__init__.py +12 -0
- keras/quantizers/__init__.py +13 -0
- keras/src/applications/imagenet_utils.py +4 -1
- keras/src/backend/common/backend_utils.py +30 -6
- keras/src/backend/common/dtypes.py +6 -12
- keras/src/backend/common/name_scope.py +2 -1
- keras/src/backend/common/variables.py +38 -20
- keras/src/backend/jax/core.py +126 -78
- keras/src/backend/jax/distribution_lib.py +16 -2
- keras/src/backend/jax/layer.py +3 -1
- keras/src/backend/jax/linalg.py +4 -0
- keras/src/backend/jax/nn.py +511 -29
- keras/src/backend/jax/numpy.py +109 -23
- keras/src/backend/jax/optimizer.py +3 -2
- keras/src/backend/jax/trainer.py +18 -3
- keras/src/backend/numpy/linalg.py +4 -0
- keras/src/backend/numpy/nn.py +313 -2
- keras/src/backend/numpy/numpy.py +97 -8
- keras/src/backend/openvino/__init__.py +1 -0
- keras/src/backend/openvino/core.py +6 -23
- keras/src/backend/openvino/linalg.py +4 -0
- keras/src/backend/openvino/nn.py +271 -20
- keras/src/backend/openvino/numpy.py +1369 -195
- keras/src/backend/openvino/random.py +7 -14
- keras/src/backend/tensorflow/layer.py +43 -9
- keras/src/backend/tensorflow/linalg.py +24 -0
- keras/src/backend/tensorflow/nn.py +545 -1
- keras/src/backend/tensorflow/numpy.py +351 -56
- keras/src/backend/tensorflow/trainer.py +6 -2
- keras/src/backend/torch/core.py +3 -1
- keras/src/backend/torch/linalg.py +4 -0
- keras/src/backend/torch/nn.py +125 -0
- keras/src/backend/torch/numpy.py +109 -9
- keras/src/backend/torch/trainer.py +8 -2
- keras/src/callbacks/__init__.py +1 -0
- keras/src/callbacks/callback_list.py +45 -11
- keras/src/callbacks/model_checkpoint.py +5 -0
- keras/src/callbacks/orbax_checkpoint.py +332 -0
- keras/src/callbacks/terminate_on_nan.py +54 -5
- keras/src/datasets/cifar10.py +5 -0
- keras/src/distillation/__init__.py +1 -0
- keras/src/distillation/distillation_loss.py +390 -0
- keras/src/distillation/distiller.py +598 -0
- keras/src/distribution/distribution_lib.py +14 -0
- keras/src/dtype_policies/__init__.py +4 -0
- keras/src/dtype_policies/dtype_policy.py +180 -1
- keras/src/export/__init__.py +2 -0
- keras/src/export/export_utils.py +39 -2
- keras/src/export/litert.py +248 -0
- keras/src/export/onnx.py +6 -0
- keras/src/export/openvino.py +1 -1
- keras/src/export/tf2onnx_lib.py +3 -0
- keras/src/layers/__init__.py +13 -0
- keras/src/layers/activations/softmax.py +9 -4
- keras/src/layers/attention/attention.py +1 -1
- keras/src/layers/attention/multi_head_attention.py +4 -1
- keras/src/layers/core/dense.py +406 -102
- keras/src/layers/core/einsum_dense.py +521 -116
- keras/src/layers/core/embedding.py +257 -99
- keras/src/layers/core/input_layer.py +1 -0
- keras/src/layers/core/reversible_embedding.py +399 -0
- keras/src/layers/input_spec.py +17 -17
- keras/src/layers/layer.py +50 -15
- keras/src/layers/merging/concatenate.py +6 -5
- keras/src/layers/merging/dot.py +4 -1
- keras/src/layers/pooling/adaptive_average_pooling1d.py +65 -0
- keras/src/layers/pooling/adaptive_average_pooling2d.py +62 -0
- keras/src/layers/pooling/adaptive_average_pooling3d.py +63 -0
- keras/src/layers/pooling/adaptive_max_pooling1d.py +65 -0
- keras/src/layers/pooling/adaptive_max_pooling2d.py +62 -0
- keras/src/layers/pooling/adaptive_max_pooling3d.py +63 -0
- keras/src/layers/pooling/base_adaptive_pooling.py +63 -0
- keras/src/layers/preprocessing/discretization.py +6 -5
- keras/src/layers/preprocessing/feature_space.py +8 -4
- keras/src/layers/preprocessing/image_preprocessing/aug_mix.py +2 -2
- keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/validation.py +5 -5
- keras/src/layers/preprocessing/image_preprocessing/random_contrast.py +3 -3
- keras/src/layers/preprocessing/image_preprocessing/resizing.py +10 -0
- keras/src/layers/preprocessing/index_lookup.py +19 -1
- keras/src/layers/preprocessing/normalization.py +16 -1
- keras/src/layers/preprocessing/string_lookup.py +26 -28
- keras/src/layers/regularization/dropout.py +43 -1
- keras/src/layers/rnn/gru.py +1 -1
- keras/src/layers/rnn/lstm.py +2 -2
- keras/src/layers/rnn/rnn.py +19 -0
- keras/src/layers/rnn/simple_rnn.py +1 -1
- keras/src/legacy/preprocessing/image.py +4 -1
- keras/src/legacy/preprocessing/sequence.py +20 -12
- keras/src/losses/loss.py +1 -1
- keras/src/losses/losses.py +24 -0
- keras/src/metrics/confusion_metrics.py +7 -6
- keras/src/models/cloning.py +4 -0
- keras/src/models/functional.py +11 -3
- keras/src/models/model.py +195 -44
- keras/src/ops/image.py +257 -20
- keras/src/ops/linalg.py +93 -0
- keras/src/ops/nn.py +268 -2
- keras/src/ops/numpy.py +701 -44
- keras/src/ops/operation.py +90 -29
- keras/src/ops/operation_utils.py +2 -0
- keras/src/optimizers/adafactor.py +29 -10
- keras/src/optimizers/base_optimizer.py +22 -3
- keras/src/optimizers/loss_scale_optimizer.py +51 -18
- keras/src/optimizers/muon.py +65 -31
- keras/src/optimizers/schedules/learning_rate_schedule.py +4 -3
- keras/src/quantizers/__init__.py +14 -1
- keras/src/quantizers/awq.py +361 -0
- keras/src/quantizers/awq_config.py +140 -0
- keras/src/quantizers/awq_core.py +217 -0
- keras/src/quantizers/gptq.py +346 -207
- keras/src/quantizers/gptq_config.py +63 -13
- keras/src/quantizers/gptq_core.py +328 -215
- keras/src/quantizers/quantization_config.py +246 -0
- keras/src/quantizers/quantizers.py +407 -38
- keras/src/quantizers/utils.py +23 -0
- keras/src/random/seed_generator.py +6 -4
- keras/src/saving/file_editor.py +81 -6
- keras/src/saving/orbax_util.py +26 -0
- keras/src/saving/saving_api.py +37 -14
- keras/src/saving/saving_lib.py +1 -1
- keras/src/testing/__init__.py +1 -0
- keras/src/testing/test_case.py +45 -5
- keras/src/trainers/compile_utils.py +38 -17
- keras/src/trainers/data_adapters/grain_dataset_adapter.py +1 -5
- keras/src/tree/torchtree_impl.py +215 -0
- keras/src/tree/tree_api.py +6 -1
- keras/src/utils/backend_utils.py +31 -4
- keras/src/utils/dataset_utils.py +234 -35
- keras/src/utils/file_utils.py +49 -11
- keras/src/utils/image_utils.py +14 -2
- keras/src/utils/jax_layer.py +244 -55
- keras/src/utils/module_utils.py +29 -0
- keras/src/utils/progbar.py +10 -12
- keras/src/utils/python_utils.py +5 -0
- keras/src/utils/rng_utils.py +9 -1
- keras/src/utils/tracking.py +70 -5
- keras/src/version.py +1 -1
- {keras_nightly-3.12.0.dev2025083103.dist-info → keras_nightly-3.14.0.dev2026011604.dist-info}/METADATA +16 -6
- {keras_nightly-3.12.0.dev2025083103.dist-info → keras_nightly-3.14.0.dev2026011604.dist-info}/RECORD +163 -142
- keras/src/quantizers/gptq_quant.py +0 -133
- {keras_nightly-3.12.0.dev2025083103.dist-info → keras_nightly-3.14.0.dev2026011604.dist-info}/WHEEL +0 -0
- {keras_nightly-3.12.0.dev2025083103.dist-info → keras_nightly-3.14.0.dev2026011604.dist-info}/top_level.txt +0 -0
keras/src/ops/numpy.py
CHANGED
|
@@ -301,33 +301,6 @@ def all(x, axis=None, keepdims=False):
|
|
|
301
301
|
return backend.numpy.all(x, axis=axis, keepdims=keepdims)
|
|
302
302
|
|
|
303
303
|
|
|
304
|
-
class Any(Operation):
|
|
305
|
-
def __init__(self, axis=None, keepdims=False, *, name=None):
|
|
306
|
-
super().__init__(name=name)
|
|
307
|
-
if isinstance(axis, int):
|
|
308
|
-
self.axis = [axis]
|
|
309
|
-
else:
|
|
310
|
-
self.axis = axis
|
|
311
|
-
self.keepdims = keepdims
|
|
312
|
-
|
|
313
|
-
def call(self, x):
|
|
314
|
-
return backend.numpy.any(
|
|
315
|
-
x,
|
|
316
|
-
axis=self.axis,
|
|
317
|
-
keepdims=self.keepdims,
|
|
318
|
-
)
|
|
319
|
-
|
|
320
|
-
def compute_output_spec(self, x):
|
|
321
|
-
return KerasTensor(
|
|
322
|
-
reduce_shape(
|
|
323
|
-
x.shape,
|
|
324
|
-
axis=self.axis,
|
|
325
|
-
keepdims=self.keepdims,
|
|
326
|
-
),
|
|
327
|
-
dtype="bool",
|
|
328
|
-
)
|
|
329
|
-
|
|
330
|
-
|
|
331
304
|
class Angle(Operation):
|
|
332
305
|
def call(self, x):
|
|
333
306
|
return backend.numpy.angle(x)
|
|
@@ -363,6 +336,33 @@ def angle(x):
|
|
|
363
336
|
return backend.numpy.angle(x)
|
|
364
337
|
|
|
365
338
|
|
|
339
|
+
class Any(Operation):
|
|
340
|
+
def __init__(self, axis=None, keepdims=False, *, name=None):
|
|
341
|
+
super().__init__(name=name)
|
|
342
|
+
if isinstance(axis, int):
|
|
343
|
+
self.axis = [axis]
|
|
344
|
+
else:
|
|
345
|
+
self.axis = axis
|
|
346
|
+
self.keepdims = keepdims
|
|
347
|
+
|
|
348
|
+
def call(self, x):
|
|
349
|
+
return backend.numpy.any(
|
|
350
|
+
x,
|
|
351
|
+
axis=self.axis,
|
|
352
|
+
keepdims=self.keepdims,
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
def compute_output_spec(self, x):
|
|
356
|
+
return KerasTensor(
|
|
357
|
+
reduce_shape(
|
|
358
|
+
x.shape,
|
|
359
|
+
axis=self.axis,
|
|
360
|
+
keepdims=self.keepdims,
|
|
361
|
+
),
|
|
362
|
+
dtype="bool",
|
|
363
|
+
)
|
|
364
|
+
|
|
365
|
+
|
|
366
366
|
@keras_export(["keras.ops.any", "keras.ops.numpy.any"])
|
|
367
367
|
def any(x, axis=None, keepdims=False):
|
|
368
368
|
"""Test whether any array element along a given axis evaluates to `True`.
|
|
@@ -595,27 +595,28 @@ class Arange(Operation):
|
|
|
595
595
|
super().__init__(name=name)
|
|
596
596
|
self.dtype = None if dtype is None else backend.standardize_dtype(dtype)
|
|
597
597
|
|
|
598
|
-
def call(self, start, stop=None, step=
|
|
598
|
+
def call(self, start, stop=None, step=None):
|
|
599
599
|
return backend.numpy.arange(start, stop, step=step, dtype=self.dtype)
|
|
600
600
|
|
|
601
|
-
def compute_output_spec(self, start, stop=None, step=
|
|
601
|
+
def compute_output_spec(self, start, stop=None, step=None):
|
|
602
602
|
if stop is None:
|
|
603
603
|
start, stop = 0, start
|
|
604
|
+
if step is None:
|
|
605
|
+
step = 1
|
|
604
606
|
output_shape = [int(np.ceil((stop - start) / step))]
|
|
605
607
|
dtype = self.dtype
|
|
606
608
|
if dtype is None:
|
|
607
|
-
dtypes_to_resolve = [
|
|
608
|
-
getattr(start, "dtype", type(start)),
|
|
609
|
-
getattr(step, "dtype", type(step)),
|
|
610
|
-
]
|
|
609
|
+
dtypes_to_resolve = [getattr(start, "dtype", type(start))]
|
|
611
610
|
if stop is not None:
|
|
612
611
|
dtypes_to_resolve.append(getattr(stop, "dtype", type(stop)))
|
|
612
|
+
if step is not None:
|
|
613
|
+
dtypes_to_resolve.append(getattr(step, "dtype", type(step)))
|
|
613
614
|
dtype = dtypes.result_type(*dtypes_to_resolve)
|
|
614
615
|
return KerasTensor(output_shape, dtype=dtype)
|
|
615
616
|
|
|
616
617
|
|
|
617
618
|
@keras_export(["keras.ops.arange", "keras.ops.numpy.arange"])
|
|
618
|
-
def arange(start, stop=None, step=
|
|
619
|
+
def arange(start, stop=None, step=None, dtype=None):
|
|
619
620
|
"""Return evenly spaced values within a given interval.
|
|
620
621
|
|
|
621
622
|
`arange` can be called with a varying number of positional arguments:
|
|
@@ -923,6 +924,11 @@ def arctanh(x):
|
|
|
923
924
|
|
|
924
925
|
Returns:
|
|
925
926
|
Output tensor of same shape as `x`.
|
|
927
|
+
|
|
928
|
+
Example:
|
|
929
|
+
>>> x = keras.ops.convert_to_tensor([0, -0.5])
|
|
930
|
+
>>> keras.ops.arctanh(x)
|
|
931
|
+
array([ 0. , -0.54930615], dtype=float32)
|
|
926
932
|
"""
|
|
927
933
|
if any_symbolic_tensors((x,)):
|
|
928
934
|
return Arctanh().symbolic_call(x)
|
|
@@ -1123,6 +1129,68 @@ def array(x, dtype=None):
|
|
|
1123
1129
|
return backend.numpy.array(x, dtype=dtype)
|
|
1124
1130
|
|
|
1125
1131
|
|
|
1132
|
+
class View(Operation):
|
|
1133
|
+
def __init__(self, dtype=None, *, name=None):
|
|
1134
|
+
super().__init__(name=name)
|
|
1135
|
+
self.dtype = None if dtype is None else backend.standardize_dtype(dtype)
|
|
1136
|
+
|
|
1137
|
+
def call(self, x):
|
|
1138
|
+
return backend.numpy.view(x, dtype=self.dtype)
|
|
1139
|
+
|
|
1140
|
+
def compute_output_spec(self, x):
|
|
1141
|
+
old_dtype = backend.standardize_dtype(x.dtype)
|
|
1142
|
+
new_dtype = backend.standardize_dtype(
|
|
1143
|
+
self.dtype if self.dtype else x.dtype
|
|
1144
|
+
)
|
|
1145
|
+
|
|
1146
|
+
old_itemsize = np.dtype(old_dtype).itemsize
|
|
1147
|
+
new_itemsize = np.dtype(new_dtype).itemsize
|
|
1148
|
+
|
|
1149
|
+
if old_itemsize == new_itemsize:
|
|
1150
|
+
return KerasTensor(x.shape, dtype=new_dtype)
|
|
1151
|
+
|
|
1152
|
+
if not x.shape:
|
|
1153
|
+
raise ValueError(
|
|
1154
|
+
"Cannot view a scalar as a different dtype if item sizes "
|
|
1155
|
+
"are different."
|
|
1156
|
+
)
|
|
1157
|
+
|
|
1158
|
+
output_shape = list(x.shape)
|
|
1159
|
+
if output_shape[-1] is not None:
|
|
1160
|
+
if (output_shape[-1] * old_itemsize) % new_itemsize != 0:
|
|
1161
|
+
raise ValueError(
|
|
1162
|
+
f"Cannot view array of shape {x.shape} and dtype {x.dtype} "
|
|
1163
|
+
f"as dtype {new_dtype} because the total number of bytes "
|
|
1164
|
+
"is not divisible by the new itemsize."
|
|
1165
|
+
)
|
|
1166
|
+
output_shape[-1] = output_shape[-1] * old_itemsize // new_itemsize
|
|
1167
|
+
return KerasTensor(tuple(output_shape), dtype=new_dtype)
|
|
1168
|
+
|
|
1169
|
+
|
|
1170
|
+
@keras_export(["keras.ops.view", "keras.ops.numpy.view"])
|
|
1171
|
+
def view(x, dtype=None):
|
|
1172
|
+
"""Create a new bitwise view of the same data with the specified dtype.
|
|
1173
|
+
|
|
1174
|
+
Args:
|
|
1175
|
+
x: Input tensor.
|
|
1176
|
+
dtype: Data-type descriptor of the returned view,
|
|
1177
|
+
e.g., float32 or int16.
|
|
1178
|
+
|
|
1179
|
+
Returns:
|
|
1180
|
+
View of a tensor with data type dtype.
|
|
1181
|
+
|
|
1182
|
+
Examples:
|
|
1183
|
+
>>> x = keras.ops.array([1, 2, 3])
|
|
1184
|
+
>>> x
|
|
1185
|
+
array([1, 2, 3], dtype=int32)
|
|
1186
|
+
>>> keras.ops.view(x, dtype="float32")
|
|
1187
|
+
array([1.0e-45, 3.0e-45, 4.0e-45], dtype=float32)
|
|
1188
|
+
"""
|
|
1189
|
+
if any_symbolic_tensors((x,)):
|
|
1190
|
+
return View(dtype=dtype).symbolic_call(x)
|
|
1191
|
+
return backend.numpy.view(x, dtype=dtype)
|
|
1192
|
+
|
|
1193
|
+
|
|
1126
1194
|
class Average(Operation):
|
|
1127
1195
|
def __init__(self, axis=None, *, name=None):
|
|
1128
1196
|
super().__init__(name=name)
|
|
@@ -3051,6 +3119,48 @@ def empty(shape, dtype=None):
|
|
|
3051
3119
|
return backend.numpy.empty(shape, dtype=dtype)
|
|
3052
3120
|
|
|
3053
3121
|
|
|
3122
|
+
class EmptyLike(Operation):
|
|
3123
|
+
def __init__(self, dtype=None, *, name=None):
|
|
3124
|
+
super().__init__(name=name)
|
|
3125
|
+
self.dtype = None if dtype is None else backend.standardize_dtype(dtype)
|
|
3126
|
+
|
|
3127
|
+
def call(self, x):
|
|
3128
|
+
return backend.numpy.empty_like(x, dtype=self.dtype)
|
|
3129
|
+
|
|
3130
|
+
def compute_output_spec(self, x):
|
|
3131
|
+
dtype = (
|
|
3132
|
+
backend.standardize_dtype(x.dtype)
|
|
3133
|
+
if self.dtype is None
|
|
3134
|
+
else self.dtype
|
|
3135
|
+
)
|
|
3136
|
+
return KerasTensor(x.shape, dtype=dtype)
|
|
3137
|
+
|
|
3138
|
+
|
|
3139
|
+
@keras_export(["keras.ops.empty_like", "keras.ops.numpy.empty_like"])
|
|
3140
|
+
def empty_like(x, dtype=None):
|
|
3141
|
+
"""Return a new uninitialized tensor with the same shape and dtype as `x`.
|
|
3142
|
+
|
|
3143
|
+
Args:
|
|
3144
|
+
x: Input tensor to mimic shape and dtype.
|
|
3145
|
+
dtype: Optional data type. If None, uses `x.dtype`.
|
|
3146
|
+
|
|
3147
|
+
Returns:
|
|
3148
|
+
A tensor with the same shape and dtype as `x`, with arbitrary contents.
|
|
3149
|
+
|
|
3150
|
+
Example:
|
|
3151
|
+
>>> from keras import ops
|
|
3152
|
+
>>> x = ops.ones((2, 3), dtype="float32")
|
|
3153
|
+
>>> y = ops.empty_like(x)
|
|
3154
|
+
>>> y.shape
|
|
3155
|
+
(2, 3)
|
|
3156
|
+
>>> y.dtype
|
|
3157
|
+
dtype('float32')
|
|
3158
|
+
"""
|
|
3159
|
+
if any_symbolic_tensors((x,)):
|
|
3160
|
+
return EmptyLike(dtype=dtype).symbolic_call(x)
|
|
3161
|
+
return backend.numpy.empty_like(x, dtype=dtype)
|
|
3162
|
+
|
|
3163
|
+
|
|
3054
3164
|
class Equal(Operation):
|
|
3055
3165
|
def call(self, x1, x2):
|
|
3056
3166
|
return backend.numpy.equal(x1, x2)
|
|
@@ -3845,6 +3955,155 @@ def isposinf(x):
|
|
|
3845
3955
|
return backend.numpy.isposinf(x)
|
|
3846
3956
|
|
|
3847
3957
|
|
|
3958
|
+
class Isreal(Operation):
|
|
3959
|
+
def call(self, x):
|
|
3960
|
+
return backend.numpy.isreal(x)
|
|
3961
|
+
|
|
3962
|
+
def compute_output_spec(self, x):
|
|
3963
|
+
return KerasTensor(x.shape, dtype="bool")
|
|
3964
|
+
|
|
3965
|
+
|
|
3966
|
+
@keras_export(["keras.ops.isreal", "keras.ops.numpy.isreal"])
|
|
3967
|
+
def isreal(x):
|
|
3968
|
+
"""Test element-wise for real numbers.
|
|
3969
|
+
|
|
3970
|
+
Args:
|
|
3971
|
+
x: Input tensor.
|
|
3972
|
+
|
|
3973
|
+
Returns:
|
|
3974
|
+
Output boolean tensor.
|
|
3975
|
+
|
|
3976
|
+
Example:
|
|
3977
|
+
>>> from keras import ops
|
|
3978
|
+
>>> x = ops.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype="complex64")
|
|
3979
|
+
>>> ops.isreal(x)
|
|
3980
|
+
array([False, True, True, True, True, False])
|
|
3981
|
+
"""
|
|
3982
|
+
if any_symbolic_tensors((x,)):
|
|
3983
|
+
return Isreal().symbolic_call(x)
|
|
3984
|
+
return backend.numpy.isreal(x)
|
|
3985
|
+
|
|
3986
|
+
|
|
3987
|
+
class Kron(Operation):
|
|
3988
|
+
def call(self, x1, x2):
|
|
3989
|
+
return backend.numpy.kron(x1, x2)
|
|
3990
|
+
|
|
3991
|
+
def compute_output_spec(self, x1, x2):
|
|
3992
|
+
x1_shape = getattr(x1, "shape", [])
|
|
3993
|
+
x2_shape = getattr(x2, "shape", [])
|
|
3994
|
+
|
|
3995
|
+
def _mul_shape_dim(a, b):
|
|
3996
|
+
if a is None or b is None:
|
|
3997
|
+
return None
|
|
3998
|
+
return a * b
|
|
3999
|
+
|
|
4000
|
+
output_shape = tuple(
|
|
4001
|
+
_mul_shape_dim(a, b) for a, b in zip(x1_shape, x2_shape)
|
|
4002
|
+
)
|
|
4003
|
+
|
|
4004
|
+
x1_type = backend.standardize_dtype(getattr(x1, "dtype", type(x1)))
|
|
4005
|
+
x2_type = backend.standardize_dtype(getattr(x2, "dtype", type(x2)))
|
|
4006
|
+
dtype = dtypes.result_type(x1_type, x2_type)
|
|
4007
|
+
return KerasTensor(output_shape, dtype=dtype)
|
|
4008
|
+
|
|
4009
|
+
|
|
4010
|
+
@keras_export(["keras.ops.kron", "keras.ops.numpy.kron"])
|
|
4011
|
+
def kron(x1, x2):
|
|
4012
|
+
"""Kronecker product of `x1` and `x2`.
|
|
4013
|
+
|
|
4014
|
+
Computes the Kronecker product of two input tensors. If `x1` has shape
|
|
4015
|
+
`(a0, a1, ..., an)` and `x2` has shape `(b0, b1, ..., bn)`, then the
|
|
4016
|
+
output will have shape `(a0*b0, a1*b1, ..., an*bn)`.
|
|
4017
|
+
|
|
4018
|
+
Args:
|
|
4019
|
+
x1: First input tensor.
|
|
4020
|
+
x2: Second input tensor.
|
|
4021
|
+
|
|
4022
|
+
Returns:
|
|
4023
|
+
A tensor representing the Kronecker product of `x1` and `x2`.
|
|
4024
|
+
"""
|
|
4025
|
+
if any_symbolic_tensors((x1, x2)):
|
|
4026
|
+
return Kron().symbolic_call(x1, x2)
|
|
4027
|
+
return backend.numpy.kron(x1, x2)
|
|
4028
|
+
|
|
4029
|
+
|
|
4030
|
+
class Lcm(Operation):
|
|
4031
|
+
def call(self, x1, x2):
|
|
4032
|
+
return backend.numpy.lcm(x1, x2)
|
|
4033
|
+
|
|
4034
|
+
def compute_output_spec(self, x1, x2):
|
|
4035
|
+
x1_shape = getattr(x1, "shape", [])
|
|
4036
|
+
x2_shape = getattr(x2, "shape", [])
|
|
4037
|
+
output_shape = broadcast_shapes(x1_shape, x2_shape)
|
|
4038
|
+
|
|
4039
|
+
x1_type = backend.standardize_dtype(getattr(x1, "dtype", type(x1)))
|
|
4040
|
+
x2_type = backend.standardize_dtype(getattr(x2, "dtype", type(x2)))
|
|
4041
|
+
dtype = dtypes.result_type(x1_type, x2_type)
|
|
4042
|
+
return KerasTensor(output_shape, dtype=dtype)
|
|
4043
|
+
|
|
4044
|
+
|
|
4045
|
+
@keras_export(["keras.ops.lcm", "keras.ops.numpy.lcm"])
|
|
4046
|
+
def lcm(x1, x2):
|
|
4047
|
+
"""Least common multiple of `x1` and `x2`, element-wise.
|
|
4048
|
+
|
|
4049
|
+
Args:
|
|
4050
|
+
x1: First input tensor (integer type).
|
|
4051
|
+
x2: Second input tensor (integer type).
|
|
4052
|
+
|
|
4053
|
+
Returns:
|
|
4054
|
+
Output tensor, element-wise least common multiple of `x1` and `x2`.
|
|
4055
|
+
|
|
4056
|
+
Example:
|
|
4057
|
+
>>> x1 = keras.ops.convert_to_tensor([2, 3, 4])
|
|
4058
|
+
>>> x2 = keras.ops.convert_to_tensor([5, 6, 7])
|
|
4059
|
+
>>> keras.ops.lcm(x1, x2)
|
|
4060
|
+
array([10, 6, 28], dtype=int32)
|
|
4061
|
+
"""
|
|
4062
|
+
if any_symbolic_tensors((x1, x2)):
|
|
4063
|
+
return Lcm().symbolic_call(x1, x2)
|
|
4064
|
+
return backend.numpy.lcm(x1, x2)
|
|
4065
|
+
|
|
4066
|
+
|
|
4067
|
+
class Ldexp(Operation):
|
|
4068
|
+
def call(self, x1, x2):
|
|
4069
|
+
return backend.numpy.ldexp(x1, x2)
|
|
4070
|
+
|
|
4071
|
+
def compute_output_spec(self, x1, x2):
|
|
4072
|
+
x1_shape = getattr(x1, "shape", [])
|
|
4073
|
+
x2_shape = getattr(x2, "shape", [])
|
|
4074
|
+
output_shape = broadcast_shapes(x1_shape, x2_shape)
|
|
4075
|
+
|
|
4076
|
+
x1_type = backend.standardize_dtype(getattr(x1, "dtype", type(x1)))
|
|
4077
|
+
x2_type = backend.standardize_dtype(getattr(x2, "dtype", type(x2)))
|
|
4078
|
+
dtype = dtypes.result_type(x1_type, x2_type, float)
|
|
4079
|
+
return KerasTensor(output_shape, dtype=dtype)
|
|
4080
|
+
|
|
4081
|
+
|
|
4082
|
+
@keras_export(["keras.ops.ldexp", "keras.ops.numpy.ldexp"])
|
|
4083
|
+
def ldexp(x1, x2):
|
|
4084
|
+
"""Multiply `x1` by 2 raised to the power of `x2`, element-wise.
|
|
4085
|
+
|
|
4086
|
+
This function computes:
|
|
4087
|
+
ldexp(x1, x2) = x1 * 2**x2
|
|
4088
|
+
|
|
4089
|
+
Args:
|
|
4090
|
+
x1: Float input tensor.
|
|
4091
|
+
x2: Integer exponent tensor.
|
|
4092
|
+
|
|
4093
|
+
Returns:
|
|
4094
|
+
Output tensor
|
|
4095
|
+
|
|
4096
|
+
Example:
|
|
4097
|
+
>>> x1 = keras.ops.convert_to_tensor([0.75, 1.5])
|
|
4098
|
+
>>> x2 = keras.ops.convert_to_tensor([1, 2])
|
|
4099
|
+
>>> keras.ops.ldexp(x1, x2)
|
|
4100
|
+
array([1.5, 6. ], dtype=float32)
|
|
4101
|
+
"""
|
|
4102
|
+
if any_symbolic_tensors((x1, x2)):
|
|
4103
|
+
return Ldexp().symbolic_call(x1, x2)
|
|
4104
|
+
return backend.numpy.ldexp(x1, x2)
|
|
4105
|
+
|
|
4106
|
+
|
|
3848
4107
|
class Less(Operation):
|
|
3849
4108
|
def call(self, x1, x2):
|
|
3850
4109
|
return backend.numpy.less(x1, x2)
|
|
@@ -4160,6 +4419,47 @@ def logaddexp(x1, x2):
|
|
|
4160
4419
|
return backend.numpy.logaddexp(x1, x2)
|
|
4161
4420
|
|
|
4162
4421
|
|
|
4422
|
+
class Logaddexp2(Operation):
|
|
4423
|
+
def call(self, x1, x2):
|
|
4424
|
+
return backend.numpy.logaddexp2(x1, x2)
|
|
4425
|
+
|
|
4426
|
+
def compute_output_spec(self, x1, x2):
|
|
4427
|
+
x1_shape = getattr(x1, "shape", [])
|
|
4428
|
+
x2_shape = getattr(x2, "shape", [])
|
|
4429
|
+
output_shape = broadcast_shapes(x1_shape, x2_shape)
|
|
4430
|
+
dtype = dtypes.result_type(
|
|
4431
|
+
getattr(x1, "dtype", type(x1)),
|
|
4432
|
+
getattr(x2, "dtype", type(x2)),
|
|
4433
|
+
float,
|
|
4434
|
+
)
|
|
4435
|
+
return KerasTensor(output_shape, dtype=dtype)
|
|
4436
|
+
|
|
4437
|
+
|
|
4438
|
+
@keras_export(["keras.ops.logaddexp2", "keras.ops.numpy.logaddexp2"])
|
|
4439
|
+
def logaddexp2(x1, x2):
|
|
4440
|
+
"""Base-2 logarithm of the sum of exponentiations of the inputs.
|
|
4441
|
+
|
|
4442
|
+
Calculates `log2(2**x1 + 2**x2)`.
|
|
4443
|
+
|
|
4444
|
+
Args:
|
|
4445
|
+
x1: Input tensor.
|
|
4446
|
+
x2: Input tensor.
|
|
4447
|
+
|
|
4448
|
+
Returns:
|
|
4449
|
+
Output tensor, element-wise log base 2 of the sum of 2**x1 and 2**x2.
|
|
4450
|
+
|
|
4451
|
+
Example:
|
|
4452
|
+
>>> from keras import ops
|
|
4453
|
+
>>> x1 = ops.array([1, 2, 3])
|
|
4454
|
+
>>> x2 = ops.array([1, 2, 3])
|
|
4455
|
+
>>> ops.logaddexp2(x1, x2)
|
|
4456
|
+
array([2., 3., 4.], dtype=float32)
|
|
4457
|
+
"""
|
|
4458
|
+
if any_symbolic_tensors((x1, x2)):
|
|
4459
|
+
return Logaddexp2().symbolic_call(x1, x2)
|
|
4460
|
+
return backend.numpy.logaddexp2(x1, x2)
|
|
4461
|
+
|
|
4462
|
+
|
|
4163
4463
|
class LogicalAnd(Operation):
|
|
4164
4464
|
def call(self, x1, x2):
|
|
4165
4465
|
return backend.numpy.logical_and(x1, x2)
|
|
@@ -5156,6 +5456,74 @@ def prod(x, axis=None, keepdims=False, dtype=None):
|
|
|
5156
5456
|
return backend.numpy.prod(x, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
5157
5457
|
|
|
5158
5458
|
|
|
5459
|
+
class Ptp(Operation):
|
|
5460
|
+
def __init__(self, axis=None, keepdims=False, *, name=None):
|
|
5461
|
+
super().__init__(name=name)
|
|
5462
|
+
self.axis = axis
|
|
5463
|
+
self.keepdims = keepdims
|
|
5464
|
+
|
|
5465
|
+
def call(self, x):
|
|
5466
|
+
return backend.numpy.ptp(
|
|
5467
|
+
x,
|
|
5468
|
+
axis=self.axis,
|
|
5469
|
+
keepdims=self.keepdims,
|
|
5470
|
+
)
|
|
5471
|
+
|
|
5472
|
+
def compute_output_spec(self, x):
|
|
5473
|
+
dtype = backend.standardize_dtype(x.dtype)
|
|
5474
|
+
return KerasTensor(
|
|
5475
|
+
reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims),
|
|
5476
|
+
dtype=dtype,
|
|
5477
|
+
)
|
|
5478
|
+
|
|
5479
|
+
|
|
5480
|
+
@keras_export(["keras.ops.ptp", "keras.ops.numpy.ptp"])
|
|
5481
|
+
def ptp(x, axis=None, keepdims=False):
|
|
5482
|
+
"""Return the peak-to-peak (max - min) value of tensor elements
|
|
5483
|
+
over a given axis.
|
|
5484
|
+
|
|
5485
|
+
The peak-to-peak value is defined as the difference between the
|
|
5486
|
+
maximum and minimum values along the specified axis.
|
|
5487
|
+
|
|
5488
|
+
Args:
|
|
5489
|
+
x: Input tensor.
|
|
5490
|
+
axis: Axis or axes along which the peak-to-peak value is computed.
|
|
5491
|
+
The default, `axis=None`, will compute the peak-to-peak value
|
|
5492
|
+
over all elements in the input tensor.
|
|
5493
|
+
keepdims: If this is set to `True`, the axes which are reduced
|
|
5494
|
+
are left in the result as dimensions with size one.
|
|
5495
|
+
|
|
5496
|
+
Returns:
|
|
5497
|
+
A tensor containing the peak-to-peak values of `x` over the
|
|
5498
|
+
given axis or axes.
|
|
5499
|
+
|
|
5500
|
+
Examples:
|
|
5501
|
+
>>> x = keras.ops.array([[1., 3., 2.],
|
|
5502
|
+
... [4., 0., 5.]])
|
|
5503
|
+
|
|
5504
|
+
>>> # Peak-to-peak over all elements
|
|
5505
|
+
>>> keras.ops.ptp(x)
|
|
5506
|
+
5.0
|
|
5507
|
+
|
|
5508
|
+
>>> # Peak-to-peak along axis 1
|
|
5509
|
+
>>> keras.ops.ptp(x, axis=1)
|
|
5510
|
+
array([2., 5.], dtype=float32)
|
|
5511
|
+
|
|
5512
|
+
>>> # Peak-to-peak over multiple axes
|
|
5513
|
+
>>> x = keras.ops.reshape(x, (1, 2, 3))
|
|
5514
|
+
>>> keras.ops.ptp(x, axis=(1, 2))
|
|
5515
|
+
array([5.], dtype=float32)
|
|
5516
|
+
|
|
5517
|
+
>>> # Keep reduced dimensions
|
|
5518
|
+
>>> keras.ops.ptp(x, axis=2, keepdims=True)
|
|
5519
|
+
array([[[2.],
|
|
5520
|
+
[5.]]], dtype=float32)
|
|
5521
|
+
"""
|
|
5522
|
+
if any_symbolic_tensors((x,)):
|
|
5523
|
+
return Ptp(axis=axis, keepdims=keepdims).symbolic_call(x)
|
|
5524
|
+
return backend.numpy.ptp(x, axis=axis, keepdims=keepdims)
|
|
5525
|
+
|
|
5526
|
+
|
|
5159
5527
|
class Quantile(Operation):
|
|
5160
5528
|
def __init__(
|
|
5161
5529
|
self, axis=None, method="linear", keepdims=False, *, name=None
|
|
@@ -5296,10 +5664,10 @@ def unravel_index(indices, shape):
|
|
|
5296
5664
|
Tuple of arrays for each dimension with unraveled indices.
|
|
5297
5665
|
|
|
5298
5666
|
Example:
|
|
5299
|
-
|
|
5300
|
-
|
|
5301
|
-
|
|
5302
|
-
|
|
5667
|
+
>>> indices = 5
|
|
5668
|
+
>>> shape = (3, 3)
|
|
5669
|
+
>>> unravel_index(indices, shape)
|
|
5670
|
+
(1, 2) # 5 is at row 1, column 2 in a 3x3 array
|
|
5303
5671
|
"""
|
|
5304
5672
|
if any_symbolic_tensors((indices,)):
|
|
5305
5673
|
return UnravelIndex(shape).symbolic_call(indices)
|
|
@@ -5546,9 +5914,11 @@ class SearchSorted(Operation):
|
|
|
5546
5914
|
"searchsorted only supports 1-D sorted sequences. Use"
|
|
5547
5915
|
"keras.ops.vectorized_map to extend to N-D sequences."
|
|
5548
5916
|
)
|
|
5917
|
+
sequence_len = sorted_sequence.shape[0]
|
|
5549
5918
|
out_type = (
|
|
5550
5919
|
"int32"
|
|
5551
|
-
if
|
|
5920
|
+
if sequence_len is not None
|
|
5921
|
+
and sequence_len <= np.iinfo(np.int32).max
|
|
5552
5922
|
else "int64"
|
|
5553
5923
|
)
|
|
5554
5924
|
return KerasTensor(values.shape, dtype=out_type)
|
|
@@ -6154,6 +6524,9 @@ class Tile(Operation):
|
|
|
6154
6524
|
repeats = self.repeats
|
|
6155
6525
|
if isinstance(repeats, int):
|
|
6156
6526
|
repeats = [repeats]
|
|
6527
|
+
else:
|
|
6528
|
+
repeats = list(repeats)
|
|
6529
|
+
|
|
6157
6530
|
if len(x_shape) > len(repeats):
|
|
6158
6531
|
repeats = [1] * (len(x_shape) - len(repeats)) + repeats
|
|
6159
6532
|
else:
|
|
@@ -6161,10 +6534,10 @@ class Tile(Operation):
|
|
|
6161
6534
|
|
|
6162
6535
|
output_shape = []
|
|
6163
6536
|
for x_size, repeat in zip(x_shape, repeats):
|
|
6164
|
-
if x_size
|
|
6165
|
-
output_shape.append(None)
|
|
6166
|
-
else:
|
|
6537
|
+
if isinstance(x_size, int):
|
|
6167
6538
|
output_shape.append(x_size * repeat)
|
|
6539
|
+
else:
|
|
6540
|
+
output_shape.append(None)
|
|
6168
6541
|
return KerasTensor(output_shape, dtype=x.dtype)
|
|
6169
6542
|
|
|
6170
6543
|
|
|
@@ -6212,8 +6585,13 @@ class Trace(Operation):
|
|
|
6212
6585
|
x_shape[self.axis2] = -1
|
|
6213
6586
|
output_shape = list(filter((-1).__ne__, x_shape))
|
|
6214
6587
|
output_dtype = backend.standardize_dtype(x.dtype)
|
|
6215
|
-
if output_dtype
|
|
6216
|
-
output_dtype =
|
|
6588
|
+
if output_dtype in ("bool", "int8", "int16"):
|
|
6589
|
+
output_dtype = "int32"
|
|
6590
|
+
elif output_dtype in ("uint8", "uint16"):
|
|
6591
|
+
output_dtype = "uint32"
|
|
6592
|
+
if output_dtype == "uint32" and backend.backend() == "torch":
|
|
6593
|
+
# Torch backend doesn't support uint32 dtype.
|
|
6594
|
+
output_dtype = "int32"
|
|
6217
6595
|
return KerasTensor(output_shape, dtype=output_dtype)
|
|
6218
6596
|
|
|
6219
6597
|
|
|
@@ -6794,6 +7172,49 @@ def negative(x):
|
|
|
6794
7172
|
return backend.numpy.negative(x)
|
|
6795
7173
|
|
|
6796
7174
|
|
|
7175
|
+
class Nextafter(Operation):
|
|
7176
|
+
def call(self, x1, x2):
|
|
7177
|
+
return backend.numpy.nextafter(x1, x2)
|
|
7178
|
+
|
|
7179
|
+
def compute_output_spec(self, x1, x2):
|
|
7180
|
+
x1_shape = getattr(x1, "shape", [])
|
|
7181
|
+
x2_shape = getattr(x2, "shape", [])
|
|
7182
|
+
output_shape = broadcast_shapes(x1_shape, x2_shape)
|
|
7183
|
+
|
|
7184
|
+
x1_type = backend.standardize_dtype(getattr(x1, "dtype", type(x1)))
|
|
7185
|
+
x2_type = backend.standardize_dtype(getattr(x2, "dtype", type(x2)))
|
|
7186
|
+
dtype = dtypes.result_type(x1_type, x2_type, float)
|
|
7187
|
+
return KerasTensor(output_shape, dtype=dtype)
|
|
7188
|
+
|
|
7189
|
+
|
|
7190
|
+
@keras_export(["keras.ops.nextafter", "keras.ops.numpy.nextafter"])
|
|
7191
|
+
def nextafter(x1, x2):
|
|
7192
|
+
"""
|
|
7193
|
+
Return the next representable floating-point value after `x1` towards `x2`.
|
|
7194
|
+
|
|
7195
|
+
This function computes the next floating-point value
|
|
7196
|
+
following `x1` in the direction of `x2`, element-wise.
|
|
7197
|
+
|
|
7198
|
+
Args:
|
|
7199
|
+
x1: Input tensor whose values will be moved to the next
|
|
7200
|
+
representable floating-point value.
|
|
7201
|
+
x2: Input tensor indicating the direction toward which
|
|
7202
|
+
`x1` is moved.
|
|
7203
|
+
|
|
7204
|
+
Returns:
|
|
7205
|
+
Output tensor
|
|
7206
|
+
|
|
7207
|
+
Example:
|
|
7208
|
+
>>> x1 = keras.ops.convert_to_tensor([1.0, 1.0])
|
|
7209
|
+
>>> x2 = keras.ops.convert_to_tensor([2.0, 0.0])
|
|
7210
|
+
>>> keras.ops.nextafter(x1, x2)
|
|
7211
|
+
array([1.0000001, 0.99999994], dtype=float32)
|
|
7212
|
+
"""
|
|
7213
|
+
if any_symbolic_tensors((x1, x2)):
|
|
7214
|
+
return Nextafter().symbolic_call(x1, x2)
|
|
7215
|
+
return backend.numpy.nextafter(x1, x2)
|
|
7216
|
+
|
|
7217
|
+
|
|
6797
7218
|
class Square(Operation):
|
|
6798
7219
|
def call(self, x):
|
|
6799
7220
|
return backend.numpy.square(x)
|
|
@@ -6930,6 +7351,48 @@ def transpose(x, axes=None):
|
|
|
6930
7351
|
return backend.numpy.transpose(x, axes=axes)
|
|
6931
7352
|
|
|
6932
7353
|
|
|
7354
|
+
class Trapezoid(Operation):
|
|
7355
|
+
def __init__(self, x=None, dx=1.0, axis=-1, *, name=None):
|
|
7356
|
+
super().__init__(name=name)
|
|
7357
|
+
self.x = x
|
|
7358
|
+
self.dx = dx
|
|
7359
|
+
self.axis = axis
|
|
7360
|
+
|
|
7361
|
+
def call(self, y):
|
|
7362
|
+
return backend.numpy.trapezoid(y, x=self.x, dx=self.dx, axis=self.axis)
|
|
7363
|
+
|
|
7364
|
+
def compute_output_spec(self, y):
|
|
7365
|
+
out_shape = list(y.shape)
|
|
7366
|
+
if self.axis is not None and len(out_shape) > 0:
|
|
7367
|
+
out_shape.pop(self.axis % len(out_shape))
|
|
7368
|
+
dtype = backend.result_type(getattr(y, "dtype", type(y)), float)
|
|
7369
|
+
return KerasTensor(tuple(out_shape), dtype=dtype)
|
|
7370
|
+
|
|
7371
|
+
|
|
7372
|
+
@keras_export(["keras.ops.trapezoid", "keras.ops.numpy.trapezoid"])
|
|
7373
|
+
def trapezoid(y, x=None, dx=1.0, axis=-1):
|
|
7374
|
+
"""Integrate along the given axis using the composite trapezoidal rule.
|
|
7375
|
+
|
|
7376
|
+
Args:
|
|
7377
|
+
y: Input tensor.
|
|
7378
|
+
x: Optional tensor specifying sample points corresponding to `y`.
|
|
7379
|
+
If `None`, spacing is assumed to be `dx`.
|
|
7380
|
+
dx: Spacing between sample points when `x` is `None`.
|
|
7381
|
+
axis: Axis along which to integrate. Default is the last axis.
|
|
7382
|
+
|
|
7383
|
+
Returns:
|
|
7384
|
+
The approximate integral of `y` along the given axis.
|
|
7385
|
+
|
|
7386
|
+
Example:
|
|
7387
|
+
>>> y = keras.ops.convert_to_tensor([[1, 2, 3], [4, 5, 6]])
|
|
7388
|
+
>>> keras.ops.trapezoid(y, axis=1)
|
|
7389
|
+
array([ 4., 10.], dtype=float32)
|
|
7390
|
+
"""
|
|
7391
|
+
if any_symbolic_tensors((y,)):
|
|
7392
|
+
return Trapezoid(x=x, dx=dx, axis=axis).symbolic_call(y)
|
|
7393
|
+
return backend.numpy.trapezoid(y, x=x, dx=dx, axis=axis)
|
|
7394
|
+
|
|
7395
|
+
|
|
6933
7396
|
class Mean(Operation):
|
|
6934
7397
|
def __init__(self, axis=None, keepdims=False, *, name=None):
|
|
6935
7398
|
super().__init__(name=name)
|
|
@@ -6975,6 +7438,77 @@ def mean(x, axis=None, keepdims=False):
|
|
|
6975
7438
|
return backend.numpy.mean(x, axis=axis, keepdims=keepdims)
|
|
6976
7439
|
|
|
6977
7440
|
|
|
7441
|
+
class Vander(Operation):
|
|
7442
|
+
def __init__(self, N=None, increasing=False, *, name=None):
|
|
7443
|
+
super().__init__(name=name)
|
|
7444
|
+
self.N = N
|
|
7445
|
+
self.increasing = increasing
|
|
7446
|
+
|
|
7447
|
+
def call(self, x):
|
|
7448
|
+
return backend.numpy.vander(x, self.N, self.increasing)
|
|
7449
|
+
|
|
7450
|
+
def compute_output_spec(self, x):
|
|
7451
|
+
if self.N is None:
|
|
7452
|
+
N = x.shape[0]
|
|
7453
|
+
else:
|
|
7454
|
+
N = self.N
|
|
7455
|
+
|
|
7456
|
+
out_shape = x.shape + (N,)
|
|
7457
|
+
return KerasTensor(tuple(out_shape), dtype=x.dtype)
|
|
7458
|
+
|
|
7459
|
+
|
|
7460
|
+
@keras_export(["keras.ops.vander", "keras.ops.numpy.vander"])
|
|
7461
|
+
def vander(x, N=None, increasing=False):
|
|
7462
|
+
"""Generate a Vandermonde matrix.
|
|
7463
|
+
|
|
7464
|
+
Args:
|
|
7465
|
+
x: 1D input tensor.
|
|
7466
|
+
N: Number of columns. If `None`, `N` = `len(x)`.
|
|
7467
|
+
increasing: Order of powers. If True, powers increase left to right.
|
|
7468
|
+
|
|
7469
|
+
Returns:
|
|
7470
|
+
Output tensor, Vandermonde matrix of shape `(len(x), N)`.
|
|
7471
|
+
|
|
7472
|
+
Example:
|
|
7473
|
+
>>> import numpy as np
|
|
7474
|
+
>>> import keras
|
|
7475
|
+
>>> x = np.array([1, 2, 3, 5])
|
|
7476
|
+
>>> keras.ops.vander(x)
|
|
7477
|
+
array([[ 1, 1, 1, 1],
|
|
7478
|
+
[ 8, 4, 2, 1],
|
|
7479
|
+
[ 27, 9, 3, 1],
|
|
7480
|
+
[125, 25, 5, 1]])
|
|
7481
|
+
"""
|
|
7482
|
+
|
|
7483
|
+
if len(x.shape) != 1:
|
|
7484
|
+
raise ValueError(
|
|
7485
|
+
"Input tensor must be 1-dimensional. "
|
|
7486
|
+
f"Received: input.shape={x.shape}"
|
|
7487
|
+
)
|
|
7488
|
+
|
|
7489
|
+
if N is not None:
|
|
7490
|
+
if not isinstance(N, int):
|
|
7491
|
+
raise TypeError(
|
|
7492
|
+
f"Argument `N` must be of type `int`. "
|
|
7493
|
+
f"Received: N={N} of type {type(N)}"
|
|
7494
|
+
)
|
|
7495
|
+
|
|
7496
|
+
if N < 0:
|
|
7497
|
+
raise ValueError(
|
|
7498
|
+
f"Argument 'N' must be nonnegative. Received: N={N}"
|
|
7499
|
+
)
|
|
7500
|
+
|
|
7501
|
+
if not isinstance(increasing, bool):
|
|
7502
|
+
raise TypeError(
|
|
7503
|
+
f"Argument `increasing` must be of type `bool`. "
|
|
7504
|
+
f"Received: increasing={increasing} of type {type(increasing)}"
|
|
7505
|
+
)
|
|
7506
|
+
|
|
7507
|
+
if any_symbolic_tensors((x,)):
|
|
7508
|
+
return Vander(N=N, increasing=increasing).symbolic_call(x)
|
|
7509
|
+
return backend.numpy.vander(x, N=N, increasing=increasing)
|
|
7510
|
+
|
|
7511
|
+
|
|
6978
7512
|
class Var(Operation):
|
|
6979
7513
|
def __init__(self, axis=None, keepdims=False, *, name=None):
|
|
6980
7514
|
super().__init__(name=name)
|
|
@@ -7104,6 +7638,19 @@ def eye(N, M=None, k=0, dtype=None):
|
|
|
7104
7638
|
Returns:
|
|
7105
7639
|
Tensor with ones on the k-th diagonal and zeros elsewhere.
|
|
7106
7640
|
"""
|
|
7641
|
+
|
|
7642
|
+
def is_floating_type(v):
|
|
7643
|
+
return (
|
|
7644
|
+
isinstance(v, float)
|
|
7645
|
+
or getattr(v, "dtype", None) in dtypes.FLOAT_TYPES
|
|
7646
|
+
)
|
|
7647
|
+
|
|
7648
|
+
if is_floating_type(N):
|
|
7649
|
+
raise TypeError("Argument `N` must be an integer or an integer tensor.")
|
|
7650
|
+
if is_floating_type(M):
|
|
7651
|
+
raise TypeError(
|
|
7652
|
+
"Argument `M` must be an integer, an integer tensor, or `None`."
|
|
7653
|
+
)
|
|
7107
7654
|
return backend.numpy.eye(N, M=M, k=k, dtype=dtype)
|
|
7108
7655
|
|
|
7109
7656
|
|
|
@@ -7255,6 +7802,15 @@ def correlate(x1, x2, mode="valid"):
|
|
|
7255
7802
|
|
|
7256
7803
|
Returns:
|
|
7257
7804
|
Output tensor, cross-correlation of `x1` and `x2`.
|
|
7805
|
+
|
|
7806
|
+
Notes:
|
|
7807
|
+
Complex-valued inputs are currently not fully supported on the
|
|
7808
|
+
TensorFlow and PyTorch backends. When complex tensors are passed,
|
|
7809
|
+
they are cast to floating-point types and the imaginary component
|
|
7810
|
+
is discarded.
|
|
7811
|
+
|
|
7812
|
+
This behavior is documented for clarity and may change in the
|
|
7813
|
+
future. See discussion in issue #21617.
|
|
7258
7814
|
"""
|
|
7259
7815
|
if any_symbolic_tensors((x1, x2)):
|
|
7260
7816
|
return Correlate(mode=mode).symbolic_call(x1, x2)
|
|
@@ -7487,3 +8043,104 @@ def histogram(x, bins=10, range=None):
|
|
|
7487
8043
|
f"Received: input.shape={x.shape}"
|
|
7488
8044
|
)
|
|
7489
8045
|
return backend.numpy.histogram(x, bins=bins, range=range)
|
|
8046
|
+
|
|
8047
|
+
|
|
8048
|
+
class ArraySplit(Operation):
|
|
8049
|
+
def __init__(self, indices_or_sections, axis=0, *, name=None):
|
|
8050
|
+
super().__init__(name=name)
|
|
8051
|
+
|
|
8052
|
+
self.indices_or_sections = indices_or_sections
|
|
8053
|
+
self.axis = axis
|
|
8054
|
+
|
|
8055
|
+
def call(self, x):
|
|
8056
|
+
return backend.numpy.array_split(
|
|
8057
|
+
x,
|
|
8058
|
+
indices_or_sections=self.indices_or_sections,
|
|
8059
|
+
axis=self.axis,
|
|
8060
|
+
)
|
|
8061
|
+
|
|
8062
|
+
def compute_output_spec(self, x):
|
|
8063
|
+
num_splits = self.indices_or_sections
|
|
8064
|
+
|
|
8065
|
+
axis = self.axis
|
|
8066
|
+
if axis < 0:
|
|
8067
|
+
axis += len(x.shape)
|
|
8068
|
+
|
|
8069
|
+
total_size = x.shape[axis]
|
|
8070
|
+
|
|
8071
|
+
if total_size is None:
|
|
8072
|
+
output_specs = []
|
|
8073
|
+
base_shape = list(x.shape)
|
|
8074
|
+
base_shape[axis] = None
|
|
8075
|
+
for _ in range(num_splits):
|
|
8076
|
+
output_specs.append(
|
|
8077
|
+
KerasTensor(shape=tuple(base_shape), dtype=x.dtype)
|
|
8078
|
+
)
|
|
8079
|
+
return tuple(output_specs)
|
|
8080
|
+
|
|
8081
|
+
split_size = total_size // num_splits
|
|
8082
|
+
remainder = total_size % num_splits
|
|
8083
|
+
|
|
8084
|
+
output_specs = []
|
|
8085
|
+
base_shape = list(x.shape)
|
|
8086
|
+
for i in range(num_splits):
|
|
8087
|
+
size = split_size + (1 if i < remainder else 0)
|
|
8088
|
+
shape = base_shape.copy()
|
|
8089
|
+
shape[axis] = size
|
|
8090
|
+
output_specs.append(KerasTensor(shape=tuple(shape), dtype=x.dtype))
|
|
8091
|
+
|
|
8092
|
+
return list(output_specs)
|
|
8093
|
+
|
|
8094
|
+
|
|
8095
|
+
@keras_export(["keras.ops.array_split", "keras.ops.numpy.array_split"])
|
|
8096
|
+
def array_split(x, indices_or_sections, axis=0):
|
|
8097
|
+
"""Splits an array into multiple sub-arrays (unevenly).
|
|
8098
|
+
|
|
8099
|
+
This is similar to `keras.ops.split`, but it allows for
|
|
8100
|
+
unequal splits. `indices_or_sections` must be an integer
|
|
8101
|
+
that indicates the total number of sub-arrays to create.
|
|
8102
|
+
If the tensor cannot be divided evenly, the first `remainder`
|
|
8103
|
+
splits will have size `quotient + 1`, and the rest will
|
|
8104
|
+
have size `quotient`.
|
|
8105
|
+
|
|
8106
|
+
Args:
|
|
8107
|
+
x: Input tensor.
|
|
8108
|
+
indices_or_sections: An integer indicating the number of
|
|
8109
|
+
sub-arrays to create.
|
|
8110
|
+
axis: The axis along which to split. Defaults to 0.
|
|
8111
|
+
|
|
8112
|
+
Returns:
|
|
8113
|
+
A list of sub-tensors.
|
|
8114
|
+
|
|
8115
|
+
Example:
|
|
8116
|
+
>>> x = keras.ops.arange(10)
|
|
8117
|
+
>>> keras.ops.array_split(x, 3)
|
|
8118
|
+
(array([0, 1, 2, 3], dtype=int32),
|
|
8119
|
+
array([4, 5, 6], dtype=int32),
|
|
8120
|
+
array([7, 8, 9], dtype=int32))
|
|
8121
|
+
"""
|
|
8122
|
+
if not isinstance(indices_or_sections, int):
|
|
8123
|
+
raise TypeError(
|
|
8124
|
+
"Argument `indices_or_sections` must be of type `int`. "
|
|
8125
|
+
f"Received: indices_or_sections={indices_or_sections}"
|
|
8126
|
+
)
|
|
8127
|
+
|
|
8128
|
+
if indices_or_sections <= 0:
|
|
8129
|
+
raise ValueError(
|
|
8130
|
+
"Argument `indices_or_sections` must be a positive integer. "
|
|
8131
|
+
f"Received: indices_or_sections={indices_or_sections}"
|
|
8132
|
+
)
|
|
8133
|
+
|
|
8134
|
+
if not isinstance(axis, int):
|
|
8135
|
+
raise TypeError(
|
|
8136
|
+
f"Argument `axis` must be of type `int`. Received: {axis}"
|
|
8137
|
+
)
|
|
8138
|
+
|
|
8139
|
+
if any_symbolic_tensors((x,)):
|
|
8140
|
+
return ArraySplit(
|
|
8141
|
+
indices_or_sections=indices_or_sections, axis=axis
|
|
8142
|
+
).symbolic_call(x)
|
|
8143
|
+
|
|
8144
|
+
return backend.numpy.array_split(
|
|
8145
|
+
x, indices_or_sections=indices_or_sections, axis=axis
|
|
8146
|
+
)
|