keras-nightly 3.14.0.dev2026010104__py3-none-any.whl → 3.14.0.dev2026012204__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- keras/_tf_keras/keras/dtype_policies/__init__.py +3 -0
- keras/_tf_keras/keras/ops/__init__.py +2 -0
- keras/_tf_keras/keras/ops/numpy/__init__.py +2 -0
- keras/_tf_keras/keras/quantizers/__init__.py +1 -0
- keras/dtype_policies/__init__.py +3 -0
- keras/ops/__init__.py +2 -0
- keras/ops/numpy/__init__.py +2 -0
- keras/quantizers/__init__.py +1 -0
- keras/src/backend/jax/nn.py +26 -9
- keras/src/backend/jax/numpy.py +10 -0
- keras/src/backend/numpy/numpy.py +15 -0
- keras/src/backend/openvino/numpy.py +338 -17
- keras/src/backend/tensorflow/numpy.py +24 -1
- keras/src/backend/tensorflow/rnn.py +17 -7
- keras/src/backend/torch/numpy.py +26 -0
- keras/src/backend/torch/rnn.py +28 -11
- keras/src/callbacks/orbax_checkpoint.py +75 -42
- keras/src/dtype_policies/__init__.py +2 -0
- keras/src/dtype_policies/dtype_policy.py +90 -1
- keras/src/layers/core/dense.py +122 -6
- keras/src/layers/core/einsum_dense.py +151 -7
- keras/src/layers/core/embedding.py +1 -1
- keras/src/layers/core/reversible_embedding.py +10 -1
- keras/src/layers/layer.py +5 -0
- keras/src/layers/preprocessing/feature_space.py +8 -4
- keras/src/layers/preprocessing/image_preprocessing/aug_mix.py +2 -2
- keras/src/layers/preprocessing/image_preprocessing/center_crop.py +13 -15
- keras/src/layers/preprocessing/image_preprocessing/random_contrast.py +3 -3
- keras/src/layers/preprocessing/image_preprocessing/resizing.py +10 -0
- keras/src/losses/losses.py +24 -0
- keras/src/models/model.py +18 -9
- keras/src/ops/image.py +106 -93
- keras/src/ops/numpy.py +138 -0
- keras/src/quantizers/__init__.py +2 -0
- keras/src/quantizers/awq.py +361 -0
- keras/src/quantizers/awq_config.py +140 -0
- keras/src/quantizers/awq_core.py +217 -0
- keras/src/quantizers/gptq.py +1 -2
- keras/src/quantizers/gptq_core.py +1 -1
- keras/src/quantizers/quantization_config.py +14 -0
- keras/src/quantizers/quantizers.py +61 -52
- keras/src/random/seed_generator.py +2 -2
- keras/src/saving/orbax_util.py +50 -0
- keras/src/saving/saving_api.py +37 -14
- keras/src/utils/jax_layer.py +69 -31
- keras/src/utils/module_utils.py +11 -0
- keras/src/utils/tracking.py +5 -5
- keras/src/version.py +1 -1
- {keras_nightly-3.14.0.dev2026010104.dist-info → keras_nightly-3.14.0.dev2026012204.dist-info}/METADATA +1 -1
- {keras_nightly-3.14.0.dev2026010104.dist-info → keras_nightly-3.14.0.dev2026012204.dist-info}/RECORD +52 -48
- {keras_nightly-3.14.0.dev2026010104.dist-info → keras_nightly-3.14.0.dev2026012204.dist-info}/WHEEL +1 -1
- {keras_nightly-3.14.0.dev2026010104.dist-info → keras_nightly-3.14.0.dev2026012204.dist-info}/top_level.txt +0 -0
|
@@ -7,6 +7,9 @@ since your modifications would be overwritten.
|
|
|
7
7
|
from keras.src.dtype_policies import deserialize as deserialize
|
|
8
8
|
from keras.src.dtype_policies import get as get
|
|
9
9
|
from keras.src.dtype_policies import serialize as serialize
|
|
10
|
+
from keras.src.dtype_policies.dtype_policy import (
|
|
11
|
+
AWQDTypePolicy as AWQDTypePolicy,
|
|
12
|
+
)
|
|
10
13
|
from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy
|
|
11
14
|
from keras.src.dtype_policies.dtype_policy import (
|
|
12
15
|
FloatDTypePolicy as FloatDTypePolicy,
|
|
@@ -245,6 +245,7 @@ from keras.src.ops.numpy import mod as mod
|
|
|
245
245
|
from keras.src.ops.numpy import moveaxis as moveaxis
|
|
246
246
|
from keras.src.ops.numpy import multiply as multiply
|
|
247
247
|
from keras.src.ops.numpy import nan_to_num as nan_to_num
|
|
248
|
+
from keras.src.ops.numpy import nansum as nansum
|
|
248
249
|
from keras.src.ops.numpy import ndim as ndim
|
|
249
250
|
from keras.src.ops.numpy import negative as negative
|
|
250
251
|
from keras.src.ops.numpy import nextafter as nextafter
|
|
@@ -256,6 +257,7 @@ from keras.src.ops.numpy import outer as outer
|
|
|
256
257
|
from keras.src.ops.numpy import pad as pad
|
|
257
258
|
from keras.src.ops.numpy import power as power
|
|
258
259
|
from keras.src.ops.numpy import prod as prod
|
|
260
|
+
from keras.src.ops.numpy import ptp as ptp
|
|
259
261
|
from keras.src.ops.numpy import quantile as quantile
|
|
260
262
|
from keras.src.ops.numpy import ravel as ravel
|
|
261
263
|
from keras.src.ops.numpy import real as real
|
|
@@ -129,6 +129,7 @@ from keras.src.ops.numpy import mod as mod
|
|
|
129
129
|
from keras.src.ops.numpy import moveaxis as moveaxis
|
|
130
130
|
from keras.src.ops.numpy import multiply as multiply
|
|
131
131
|
from keras.src.ops.numpy import nan_to_num as nan_to_num
|
|
132
|
+
from keras.src.ops.numpy import nansum as nansum
|
|
132
133
|
from keras.src.ops.numpy import ndim as ndim
|
|
133
134
|
from keras.src.ops.numpy import negative as negative
|
|
134
135
|
from keras.src.ops.numpy import nextafter as nextafter
|
|
@@ -140,6 +141,7 @@ from keras.src.ops.numpy import outer as outer
|
|
|
140
141
|
from keras.src.ops.numpy import pad as pad
|
|
141
142
|
from keras.src.ops.numpy import power as power
|
|
142
143
|
from keras.src.ops.numpy import prod as prod
|
|
144
|
+
from keras.src.ops.numpy import ptp as ptp
|
|
143
145
|
from keras.src.ops.numpy import quantile as quantile
|
|
144
146
|
from keras.src.ops.numpy import ravel as ravel
|
|
145
147
|
from keras.src.ops.numpy import real as real
|
|
@@ -7,6 +7,7 @@ since your modifications would be overwritten.
|
|
|
7
7
|
from keras.src.quantizers import deserialize as deserialize
|
|
8
8
|
from keras.src.quantizers import get as get
|
|
9
9
|
from keras.src.quantizers import serialize as serialize
|
|
10
|
+
from keras.src.quantizers.awq_config import AWQConfig as AWQConfig
|
|
10
11
|
from keras.src.quantizers.gptq_config import GPTQConfig as GPTQConfig
|
|
11
12
|
from keras.src.quantizers.quantization_config import (
|
|
12
13
|
Float8QuantizationConfig as Float8QuantizationConfig,
|
keras/dtype_policies/__init__.py
CHANGED
|
@@ -7,6 +7,9 @@ since your modifications would be overwritten.
|
|
|
7
7
|
from keras.src.dtype_policies import deserialize as deserialize
|
|
8
8
|
from keras.src.dtype_policies import get as get
|
|
9
9
|
from keras.src.dtype_policies import serialize as serialize
|
|
10
|
+
from keras.src.dtype_policies.dtype_policy import (
|
|
11
|
+
AWQDTypePolicy as AWQDTypePolicy,
|
|
12
|
+
)
|
|
10
13
|
from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy
|
|
11
14
|
from keras.src.dtype_policies.dtype_policy import (
|
|
12
15
|
FloatDTypePolicy as FloatDTypePolicy,
|
keras/ops/__init__.py
CHANGED
|
@@ -245,6 +245,7 @@ from keras.src.ops.numpy import mod as mod
|
|
|
245
245
|
from keras.src.ops.numpy import moveaxis as moveaxis
|
|
246
246
|
from keras.src.ops.numpy import multiply as multiply
|
|
247
247
|
from keras.src.ops.numpy import nan_to_num as nan_to_num
|
|
248
|
+
from keras.src.ops.numpy import nansum as nansum
|
|
248
249
|
from keras.src.ops.numpy import ndim as ndim
|
|
249
250
|
from keras.src.ops.numpy import negative as negative
|
|
250
251
|
from keras.src.ops.numpy import nextafter as nextafter
|
|
@@ -256,6 +257,7 @@ from keras.src.ops.numpy import outer as outer
|
|
|
256
257
|
from keras.src.ops.numpy import pad as pad
|
|
257
258
|
from keras.src.ops.numpy import power as power
|
|
258
259
|
from keras.src.ops.numpy import prod as prod
|
|
260
|
+
from keras.src.ops.numpy import ptp as ptp
|
|
259
261
|
from keras.src.ops.numpy import quantile as quantile
|
|
260
262
|
from keras.src.ops.numpy import ravel as ravel
|
|
261
263
|
from keras.src.ops.numpy import real as real
|
keras/ops/numpy/__init__.py
CHANGED
|
@@ -129,6 +129,7 @@ from keras.src.ops.numpy import mod as mod
|
|
|
129
129
|
from keras.src.ops.numpy import moveaxis as moveaxis
|
|
130
130
|
from keras.src.ops.numpy import multiply as multiply
|
|
131
131
|
from keras.src.ops.numpy import nan_to_num as nan_to_num
|
|
132
|
+
from keras.src.ops.numpy import nansum as nansum
|
|
132
133
|
from keras.src.ops.numpy import ndim as ndim
|
|
133
134
|
from keras.src.ops.numpy import negative as negative
|
|
134
135
|
from keras.src.ops.numpy import nextafter as nextafter
|
|
@@ -140,6 +141,7 @@ from keras.src.ops.numpy import outer as outer
|
|
|
140
141
|
from keras.src.ops.numpy import pad as pad
|
|
141
142
|
from keras.src.ops.numpy import power as power
|
|
142
143
|
from keras.src.ops.numpy import prod as prod
|
|
144
|
+
from keras.src.ops.numpy import ptp as ptp
|
|
143
145
|
from keras.src.ops.numpy import quantile as quantile
|
|
144
146
|
from keras.src.ops.numpy import ravel as ravel
|
|
145
147
|
from keras.src.ops.numpy import real as real
|
keras/quantizers/__init__.py
CHANGED
|
@@ -7,6 +7,7 @@ since your modifications would be overwritten.
|
|
|
7
7
|
from keras.src.quantizers import deserialize as deserialize
|
|
8
8
|
from keras.src.quantizers import get as get
|
|
9
9
|
from keras.src.quantizers import serialize as serialize
|
|
10
|
+
from keras.src.quantizers.awq_config import AWQConfig as AWQConfig
|
|
10
11
|
from keras.src.quantizers.gptq_config import GPTQConfig as GPTQConfig
|
|
11
12
|
from keras.src.quantizers.quantization_config import (
|
|
12
13
|
Float8QuantizationConfig as Float8QuantizationConfig,
|
keras/src/backend/jax/nn.py
CHANGED
|
@@ -1471,25 +1471,42 @@ def _can_use_flash_attention(query, key, value, bias, raise_error=False):
|
|
|
1471
1471
|
# Only support at least Ampere
|
|
1472
1472
|
if not check_compute_capability("8.0"):
|
|
1473
1473
|
raise RuntimeError("Require at least Ampere arch to run")
|
|
1474
|
-
|
|
1474
|
+
|
|
1475
|
+
# Inspect inputs of `check_layout`
|
|
1475
1476
|
check_layout_params = list(
|
|
1476
1477
|
inspect.signature(check_layout).parameters.keys()
|
|
1477
1478
|
)
|
|
1478
1479
|
for known_param in ("query", "key", "value", "bias", "layout"):
|
|
1479
1480
|
check_layout_params.remove(known_param)
|
|
1480
1481
|
# Defaults to `None` when not specified.
|
|
1481
|
-
|
|
1482
|
+
check_layout_kwargs = {key: None for key in check_layout_params}
|
|
1482
1483
|
check_layout(
|
|
1483
|
-
query, key, value, bias, layout=_normalize_layout("BTNH"), **kwargs
|
|
1484
|
-
)
|
|
1485
|
-
check_is_flash_attention(
|
|
1486
1484
|
query,
|
|
1487
1485
|
key,
|
|
1488
|
-
|
|
1489
|
-
|
|
1490
|
-
|
|
1491
|
-
|
|
1486
|
+
value,
|
|
1487
|
+
bias,
|
|
1488
|
+
layout=_normalize_layout("BTNH"),
|
|
1489
|
+
**check_layout_kwargs,
|
|
1492
1490
|
)
|
|
1491
|
+
|
|
1492
|
+
# Inspect inputs of `check_is_flash_attention`
|
|
1493
|
+
check_is_flash_attention_params = inspect.signature(
|
|
1494
|
+
check_is_flash_attention
|
|
1495
|
+
).parameters
|
|
1496
|
+
check_is_flash_attention_kwargs = {
|
|
1497
|
+
"query": query,
|
|
1498
|
+
"key": key,
|
|
1499
|
+
"value": value,
|
|
1500
|
+
"layout": _normalize_layout("BTNH"),
|
|
1501
|
+
"cudnn_version": cudnn_version,
|
|
1502
|
+
"has_bias": bias is not None,
|
|
1503
|
+
"is_training": False,
|
|
1504
|
+
}
|
|
1505
|
+
# Remove unsupported arguments
|
|
1506
|
+
for param in list(check_is_flash_attention_kwargs.keys()):
|
|
1507
|
+
if param not in check_is_flash_attention_params:
|
|
1508
|
+
check_is_flash_attention_kwargs.pop(param)
|
|
1509
|
+
check_is_flash_attention(**check_is_flash_attention_kwargs)
|
|
1493
1510
|
return True
|
|
1494
1511
|
except:
|
|
1495
1512
|
if raise_error:
|
keras/src/backend/jax/numpy.py
CHANGED
|
@@ -1013,6 +1013,11 @@ def moveaxis(x, source, destination):
|
|
|
1013
1013
|
return jnp.moveaxis(x, source=source, destination=destination)
|
|
1014
1014
|
|
|
1015
1015
|
|
|
1016
|
+
def nansum(x, axis=None, keepdims=False):
|
|
1017
|
+
x = convert_to_tensor(x)
|
|
1018
|
+
return jnp.nansum(x, axis=axis, keepdims=keepdims)
|
|
1019
|
+
|
|
1020
|
+
|
|
1016
1021
|
def nan_to_num(x, nan=0.0, posinf=None, neginf=None):
|
|
1017
1022
|
x = convert_to_tensor(x)
|
|
1018
1023
|
return jnp.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf)
|
|
@@ -1063,6 +1068,11 @@ def prod(x, axis=None, keepdims=False, dtype=None):
|
|
|
1063
1068
|
return jnp.prod(x, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
1064
1069
|
|
|
1065
1070
|
|
|
1071
|
+
def ptp(x, axis=None, keepdims=False):
|
|
1072
|
+
x = convert_to_tensor(x)
|
|
1073
|
+
return jnp.ptp(x, axis=axis, keepdims=keepdims)
|
|
1074
|
+
|
|
1075
|
+
|
|
1066
1076
|
def quantile(x, q, axis=None, method="linear", keepdims=False):
|
|
1067
1077
|
x = convert_to_tensor(x)
|
|
1068
1078
|
q = convert_to_tensor(q)
|
keras/src/backend/numpy/numpy.py
CHANGED
|
@@ -960,6 +960,17 @@ def moveaxis(x, source, destination):
|
|
|
960
960
|
return np.moveaxis(x, source=source, destination=destination)
|
|
961
961
|
|
|
962
962
|
|
|
963
|
+
def nansum(x, axis=None, keepdims=False):
|
|
964
|
+
axis = standardize_axis_for_numpy(axis)
|
|
965
|
+
dtype = standardize_dtype(x.dtype)
|
|
966
|
+
|
|
967
|
+
if dtype in ("bool", "int8", "int16"):
|
|
968
|
+
dtype = "int32"
|
|
969
|
+
elif dtype in ("uint8", "uint16"):
|
|
970
|
+
dtype = "uint32"
|
|
971
|
+
return np.nansum(x, axis=axis, keepdims=keepdims).astype(dtype)
|
|
972
|
+
|
|
973
|
+
|
|
963
974
|
def nan_to_num(x, nan=0.0, posinf=None, neginf=None):
|
|
964
975
|
return np.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf)
|
|
965
976
|
|
|
@@ -1018,6 +1029,10 @@ def prod(x, axis=None, keepdims=False, dtype=None):
|
|
|
1018
1029
|
return np.prod(x, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
1019
1030
|
|
|
1020
1031
|
|
|
1032
|
+
def ptp(x, axis=None, keepdims=False):
|
|
1033
|
+
return np.ptp(x, axis=axis, keepdims=keepdims)
|
|
1034
|
+
|
|
1035
|
+
|
|
1021
1036
|
def quantile(x, q, axis=None, method="linear", keepdims=False):
|
|
1022
1037
|
axis = standardize_axis_for_numpy(axis)
|
|
1023
1038
|
x = convert_to_tensor(x)
|
|
@@ -4,6 +4,7 @@ from openvino import Type
|
|
|
4
4
|
|
|
5
5
|
from keras.src.backend import config
|
|
6
6
|
from keras.src.backend.common import dtypes
|
|
7
|
+
from keras.src.backend.common.backend_utils import canonicalize_axis
|
|
7
8
|
from keras.src.backend.common.variables import standardize_dtype
|
|
8
9
|
from keras.src.backend.openvino.core import DTYPES_MAX
|
|
9
10
|
from keras.src.backend.openvino.core import DTYPES_MIN
|
|
@@ -705,7 +706,16 @@ def broadcast_to(x, shape):
|
|
|
705
706
|
|
|
706
707
|
|
|
707
708
|
def cbrt(x):
|
|
708
|
-
|
|
709
|
+
x = get_ov_output(x)
|
|
710
|
+
x_type = x.get_element_type()
|
|
711
|
+
if x_type.is_integral() or x_type == Type.boolean:
|
|
712
|
+
x = ov_opset.convert(x, OPENVINO_DTYPES[config.floatx()]).output(0)
|
|
713
|
+
sign_x = ov_opset.sign(x)
|
|
714
|
+
abs_x = ov_opset.absolute(x)
|
|
715
|
+
one_third = ov_opset.constant(1.0 / 3.0, x.get_element_type())
|
|
716
|
+
root_abs = ov_opset.power(abs_x, one_third)
|
|
717
|
+
res = ov_opset.multiply(sign_x, root_abs)
|
|
718
|
+
return OpenVINOKerasTensor(res.output(0))
|
|
709
719
|
|
|
710
720
|
|
|
711
721
|
def ceil(x):
|
|
@@ -893,9 +903,53 @@ def diag(x, k=0):
|
|
|
893
903
|
|
|
894
904
|
|
|
895
905
|
def diagonal(x, offset=0, axis1=0, axis2=1):
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
)
|
|
906
|
+
x = get_ov_output(x)
|
|
907
|
+
shape = x.get_partial_shape()
|
|
908
|
+
rank = x.get_partial_shape().rank.get_length()
|
|
909
|
+
if rank is None:
|
|
910
|
+
raise ValueError("`diagonal` requires input tensor with static rank.")
|
|
911
|
+
if rank < 2:
|
|
912
|
+
raise ValueError(
|
|
913
|
+
f"diagonal requires input tensor with rank >= 2.Given rank: {rank}"
|
|
914
|
+
)
|
|
915
|
+
axis1 = canonicalize_axis(axis1, rank)
|
|
916
|
+
axis2 = canonicalize_axis(axis2, rank)
|
|
917
|
+
if axis1 == axis2:
|
|
918
|
+
raise ValueError("`axis1` and `axis2` cannot be the same.")
|
|
919
|
+
|
|
920
|
+
perm_order = [axis1, axis2] + [
|
|
921
|
+
i for i in range(rank) if i != axis1 and i != axis2
|
|
922
|
+
]
|
|
923
|
+
perm_const = ov_opset.constant(perm_order, dtype=Type.i32).output(0)
|
|
924
|
+
x_transposed = ov_opset.transpose(x, perm_const)
|
|
925
|
+
|
|
926
|
+
N_dim = shape[axis1]
|
|
927
|
+
M_dim = shape[axis2]
|
|
928
|
+
if not N_dim.is_static or not M_dim.is_static:
|
|
929
|
+
raise ValueError(
|
|
930
|
+
"`diagonal` requires input tensor with static shape for axes "
|
|
931
|
+
f"`axis1` ({axis1}) and `axis2` ({axis2})."
|
|
932
|
+
)
|
|
933
|
+
N = N_dim.get_length()
|
|
934
|
+
M = M_dim.get_length()
|
|
935
|
+
if offset >= 0:
|
|
936
|
+
L = np.minimum(N, M - offset) if (M - offset) > 0 else 0
|
|
937
|
+
indices = [[i, i + offset] for i in range(L)]
|
|
938
|
+
else:
|
|
939
|
+
L = np.minimum(N + offset, M) if (N + offset) > 0 else 0
|
|
940
|
+
indices = [[i - offset, i] for i in range(L)]
|
|
941
|
+
|
|
942
|
+
indices = np.array(indices, dtype=np.int32).reshape(L, 2)
|
|
943
|
+
indices_const = ov_opset.constant(indices, dtype=Type.i32).output(0)
|
|
944
|
+
|
|
945
|
+
diag_gathered = ov_opset.gather_nd(x_transposed, indices_const)
|
|
946
|
+
|
|
947
|
+
out_rank = rank - 1
|
|
948
|
+
out_perm_order = list(range(1, out_rank)) + [0]
|
|
949
|
+
out_perm_const = ov_opset.constant(out_perm_order, dtype=Type.i32).output(0)
|
|
950
|
+
|
|
951
|
+
final_output = ov_opset.transpose(diag_gathered, out_perm_const)
|
|
952
|
+
return OpenVINOKerasTensor(final_output.output(0))
|
|
899
953
|
|
|
900
954
|
|
|
901
955
|
def diff(a, n=1, axis=-1):
|
|
@@ -1072,22 +1126,28 @@ def expm1(x):
|
|
|
1072
1126
|
|
|
1073
1127
|
def flip(x, axis=None):
|
|
1074
1128
|
x_node = get_ov_output(x)
|
|
1075
|
-
|
|
1129
|
+
|
|
1130
|
+
# Using OpenVINO tensor shape
|
|
1131
|
+
ndim = len(x_node.get_partial_shape())
|
|
1076
1132
|
if ndim is None:
|
|
1077
1133
|
raise ValueError(
|
|
1078
|
-
"The `flip` operation does not support tensors with dynamic rank"
|
|
1134
|
+
"The `flip` operation does not support tensors with dynamic rank "
|
|
1079
1135
|
"for the OpenVINO backend."
|
|
1080
1136
|
)
|
|
1137
|
+
|
|
1081
1138
|
if axis is None:
|
|
1082
1139
|
axis = list(range(ndim))
|
|
1083
1140
|
elif isinstance(axis, int):
|
|
1084
1141
|
axis = [axis]
|
|
1142
|
+
|
|
1085
1143
|
axis = [a + ndim if a < 0 else a for a in axis]
|
|
1144
|
+
|
|
1086
1145
|
begin = [0] * ndim
|
|
1087
1146
|
end = [0] * ndim
|
|
1088
1147
|
strides = [1] * ndim
|
|
1089
1148
|
for a in axis:
|
|
1090
1149
|
strides[a] = -1
|
|
1150
|
+
|
|
1091
1151
|
all_ones_mask = [1] * ndim
|
|
1092
1152
|
result = ov_opset.strided_slice(
|
|
1093
1153
|
data=x_node,
|
|
@@ -1100,6 +1160,61 @@ def flip(x, axis=None):
|
|
|
1100
1160
|
return OpenVINOKerasTensor(result.output(0))
|
|
1101
1161
|
|
|
1102
1162
|
|
|
1163
|
+
def rot90(array, k=1, axes=(0, 1)):
|
|
1164
|
+
"""Rotate an array by 90 degrees in the plane specified by axes."""
|
|
1165
|
+
array = get_ov_output(array)
|
|
1166
|
+
|
|
1167
|
+
if not isinstance(axes, (tuple, list)) or len(axes) != 2:
|
|
1168
|
+
raise ValueError("axes must be a tuple of length 2")
|
|
1169
|
+
|
|
1170
|
+
shape = array.get_partial_shape()
|
|
1171
|
+
ndim = shape.rank.get_length()
|
|
1172
|
+
if ndim is None:
|
|
1173
|
+
raise ValueError(
|
|
1174
|
+
"`rot90` does not support tensors with dynamic rank "
|
|
1175
|
+
"for the OpenVINO backend."
|
|
1176
|
+
)
|
|
1177
|
+
|
|
1178
|
+
axis1 = canonicalize_axis(axes[0], ndim)
|
|
1179
|
+
axis2 = canonicalize_axis(axes[1], ndim)
|
|
1180
|
+
|
|
1181
|
+
if axis1 == axis2:
|
|
1182
|
+
raise ValueError("axes must be different")
|
|
1183
|
+
|
|
1184
|
+
k = k % 4
|
|
1185
|
+
if k == 0:
|
|
1186
|
+
return OpenVINOKerasTensor(array)
|
|
1187
|
+
|
|
1188
|
+
result = array
|
|
1189
|
+
|
|
1190
|
+
for _ in range(k):
|
|
1191
|
+
# 1️ Transpose axis1 <-> axis2
|
|
1192
|
+
perm = list(range(ndim))
|
|
1193
|
+
perm[axis1], perm[axis2] = perm[axis2], perm[axis1]
|
|
1194
|
+
perm_const = ov_opset.constant(perm, Type.i32).output(0)
|
|
1195
|
+
result = ov_opset.transpose(result, perm_const).output(0)
|
|
1196
|
+
|
|
1197
|
+
# 2️ Reverse along axis1 using StridedSlice
|
|
1198
|
+
begin = [0] * ndim
|
|
1199
|
+
end = [0] * ndim
|
|
1200
|
+
strides = [1] * ndim
|
|
1201
|
+
strides[axis1] = -1
|
|
1202
|
+
|
|
1203
|
+
begin_mask = [1] * ndim
|
|
1204
|
+
end_mask = [1] * ndim
|
|
1205
|
+
|
|
1206
|
+
result = ov_opset.strided_slice(
|
|
1207
|
+
data=result,
|
|
1208
|
+
begin=begin,
|
|
1209
|
+
end=end,
|
|
1210
|
+
strides=strides,
|
|
1211
|
+
begin_mask=begin_mask,
|
|
1212
|
+
end_mask=end_mask,
|
|
1213
|
+
).output(0)
|
|
1214
|
+
|
|
1215
|
+
return OpenVINOKerasTensor(result)
|
|
1216
|
+
|
|
1217
|
+
|
|
1103
1218
|
def floor(x):
|
|
1104
1219
|
x = get_ov_output(x)
|
|
1105
1220
|
x_type = x.get_element_type()
|
|
@@ -1176,7 +1291,34 @@ def hstack(xs):
|
|
|
1176
1291
|
|
|
1177
1292
|
|
|
1178
1293
|
def hypot(x1, x2):
|
|
1179
|
-
|
|
1294
|
+
element_type = None
|
|
1295
|
+
if isinstance(x1, OpenVINOKerasTensor):
|
|
1296
|
+
element_type = x1.output.get_element_type()
|
|
1297
|
+
if isinstance(x2, OpenVINOKerasTensor):
|
|
1298
|
+
element_type = x2.output.get_element_type()
|
|
1299
|
+
x1 = get_ov_output(x1, element_type)
|
|
1300
|
+
x2 = get_ov_output(x2, element_type)
|
|
1301
|
+
x1, x2 = _align_operand_types(x1, x2, "hypot()")
|
|
1302
|
+
x_type = x1.get_element_type()
|
|
1303
|
+
if x_type.is_integral() or x_type == Type.boolean:
|
|
1304
|
+
ov_type = OPENVINO_DTYPES[config.floatx()]
|
|
1305
|
+
x1 = ov_opset.convert(x1, ov_type)
|
|
1306
|
+
x2 = ov_opset.convert(x2, ov_type)
|
|
1307
|
+
x1_abs = ov_opset.absolute(x1)
|
|
1308
|
+
x2_abs = ov_opset.absolute(x2)
|
|
1309
|
+
max_val = ov_opset.maximum(x1_abs, x2_abs)
|
|
1310
|
+
min_val = ov_opset.minimum(x1_abs, x2_abs)
|
|
1311
|
+
one = ov_opset.constant(1, max_val.get_element_type())
|
|
1312
|
+
is_zero_mask = ov_opset.equal(
|
|
1313
|
+
max_val, ov_opset.constant(0, max_val.get_element_type())
|
|
1314
|
+
)
|
|
1315
|
+
safe_divisor = ov_opset.select(is_zero_mask, one, max_val)
|
|
1316
|
+
ratio = ov_opset.divide(min_val, safe_divisor)
|
|
1317
|
+
result = ov_opset.multiply(
|
|
1318
|
+
max_val,
|
|
1319
|
+
ov_opset.sqrt(ov_opset.add(one, ov_opset.multiply(ratio, ratio))),
|
|
1320
|
+
)
|
|
1321
|
+
return OpenVINOKerasTensor(result.output(0))
|
|
1180
1322
|
|
|
1181
1323
|
|
|
1182
1324
|
def identity(n, dtype=None):
|
|
@@ -1313,7 +1455,66 @@ def isreal(x):
|
|
|
1313
1455
|
|
|
1314
1456
|
|
|
1315
1457
|
def kron(x1, x2):
|
|
1316
|
-
|
|
1458
|
+
x1 = get_ov_output(x1)
|
|
1459
|
+
x2 = get_ov_output(x2)
|
|
1460
|
+
x1, x2 = _align_operand_types(x1, x2, "kron()")
|
|
1461
|
+
x1_shape = x1.get_partial_shape()
|
|
1462
|
+
x2_shape = x2.get_partial_shape()
|
|
1463
|
+
if x1_shape.rank.is_dynamic or x2_shape.rank.is_dynamic:
|
|
1464
|
+
raise ValueError(
|
|
1465
|
+
"`kron` does not support tensors with dynamic rank for "
|
|
1466
|
+
"the OpenVINO backend."
|
|
1467
|
+
)
|
|
1468
|
+
ndim1 = x1_shape.rank.get_length()
|
|
1469
|
+
ndim2 = x2_shape.rank.get_length()
|
|
1470
|
+
if ndim1 < ndim2:
|
|
1471
|
+
axes = ov_opset.range(
|
|
1472
|
+
ov_opset.constant(0, Type.i32),
|
|
1473
|
+
ov_opset.constant(ndim2 - ndim1, Type.i32),
|
|
1474
|
+
ov_opset.constant(1, Type.i32),
|
|
1475
|
+
)
|
|
1476
|
+
x1 = ov_opset.unsqueeze(x1, axes)
|
|
1477
|
+
ndim1 = ndim2
|
|
1478
|
+
elif ndim2 < ndim1:
|
|
1479
|
+
axes = ov_opset.range(
|
|
1480
|
+
ov_opset.constant(0, Type.i32),
|
|
1481
|
+
ov_opset.constant(ndim1 - ndim2, Type.i32),
|
|
1482
|
+
ov_opset.constant(1, Type.i32),
|
|
1483
|
+
)
|
|
1484
|
+
x2 = ov_opset.unsqueeze(x2, axes)
|
|
1485
|
+
ndim2 = ndim1
|
|
1486
|
+
shape1 = ov_opset.shape_of(x1, Type.i32)
|
|
1487
|
+
shape2 = ov_opset.shape_of(x2, Type.i32)
|
|
1488
|
+
ones = ov_opset.broadcast(
|
|
1489
|
+
ov_opset.constant(1, Type.i32), ov_opset.constant([ndim1], Type.i32)
|
|
1490
|
+
)
|
|
1491
|
+
axis = ov_opset.constant(1, Type.i32)
|
|
1492
|
+
flatten = ov_opset.constant([-1], Type.i32)
|
|
1493
|
+
unsqueezed_ones = ov_opset.unsqueeze(ones, axis)
|
|
1494
|
+
x1_new_shape = ov_opset.reshape(
|
|
1495
|
+
ov_opset.concat(
|
|
1496
|
+
[ov_opset.unsqueeze(shape1, axis), unsqueezed_ones],
|
|
1497
|
+
axis=1,
|
|
1498
|
+
),
|
|
1499
|
+
flatten,
|
|
1500
|
+
False,
|
|
1501
|
+
)
|
|
1502
|
+
x2_new_shape = ov_opset.reshape(
|
|
1503
|
+
ov_opset.concat(
|
|
1504
|
+
[unsqueezed_ones, ov_opset.unsqueeze(shape2, axis)],
|
|
1505
|
+
axis=1,
|
|
1506
|
+
),
|
|
1507
|
+
flatten,
|
|
1508
|
+
False,
|
|
1509
|
+
)
|
|
1510
|
+
result = ov_opset.multiply(
|
|
1511
|
+
ov_opset.reshape(x1, x1_new_shape, False),
|
|
1512
|
+
ov_opset.reshape(x2, x2_new_shape, False),
|
|
1513
|
+
)
|
|
1514
|
+
result = ov_opset.reshape(
|
|
1515
|
+
result, ov_opset.multiply(shape1, shape2), False
|
|
1516
|
+
).output(0)
|
|
1517
|
+
return OpenVINOKerasTensor(result)
|
|
1317
1518
|
|
|
1318
1519
|
|
|
1319
1520
|
def lcm(x1, x2):
|
|
@@ -1578,9 +1779,42 @@ def logaddexp(x1, x2):
|
|
|
1578
1779
|
|
|
1579
1780
|
|
|
1580
1781
|
def logaddexp2(x1, x2):
|
|
1581
|
-
|
|
1582
|
-
|
|
1782
|
+
element_type = None
|
|
1783
|
+
if isinstance(x1, OpenVINOKerasTensor):
|
|
1784
|
+
element_type = x1.output.get_element_type()
|
|
1785
|
+
if isinstance(x2, OpenVINOKerasTensor):
|
|
1786
|
+
element_type = x2.output.get_element_type()
|
|
1787
|
+
x1 = get_ov_output(x1, element_type)
|
|
1788
|
+
x2 = get_ov_output(x2, element_type)
|
|
1789
|
+
x1, x2 = _align_operand_types(x1, x2, "logaddexp2()")
|
|
1790
|
+
|
|
1791
|
+
if x1.element_type.is_integral() or x2.element_type.is_integral():
|
|
1792
|
+
float_dtype = OPENVINO_DTYPES[config.floatx()]
|
|
1793
|
+
if x1.get_element_type().is_integral():
|
|
1794
|
+
x1 = ov_opset.convert(x1, float_dtype)
|
|
1795
|
+
if x2.get_element_type().is_integral():
|
|
1796
|
+
x2 = ov_opset.convert(x2, float_dtype)
|
|
1797
|
+
|
|
1798
|
+
max_val = ov_opset.maximum(x1, x2)
|
|
1799
|
+
|
|
1800
|
+
sub = ov_opset.subtract(x1, x2)
|
|
1801
|
+
abs_diff = ov_opset.abs(sub)
|
|
1802
|
+
|
|
1803
|
+
neg_abs_diff = ov_opset.negative(abs_diff)
|
|
1804
|
+
|
|
1805
|
+
element_type = neg_abs_diff.get_element_type()
|
|
1806
|
+
|
|
1807
|
+
two = ov_opset.constant(2, dtype=element_type)
|
|
1808
|
+
|
|
1809
|
+
power_of_2 = ov_opset.power(two, neg_abs_diff)
|
|
1810
|
+
|
|
1811
|
+
one_plus_power = ov_opset.add(
|
|
1812
|
+
ov_opset.constant(1, dtype=element_type), power_of_2
|
|
1583
1813
|
)
|
|
1814
|
+
log2_term = ov_opset.divide(ov_opset.log(one_plus_power), ov_opset.log(two))
|
|
1815
|
+
result = ov_opset.add(max_val, log2_term).output(0)
|
|
1816
|
+
|
|
1817
|
+
return OpenVINOKerasTensor(result)
|
|
1584
1818
|
|
|
1585
1819
|
|
|
1586
1820
|
def logical_and(x1, x2):
|
|
@@ -1855,6 +2089,10 @@ def moveaxis(x, source, destination):
|
|
|
1855
2089
|
return OpenVINOKerasTensor(ov_opset.transpose(x, axes_const).output(0))
|
|
1856
2090
|
|
|
1857
2091
|
|
|
2092
|
+
def nansum(x, axis=None, keepdims=False):
|
|
2093
|
+
raise NotImplementedError("`nansum` is not supported with openvino backend")
|
|
2094
|
+
|
|
2095
|
+
|
|
1858
2096
|
def nan_to_num(x, nan=0.0, posinf=None, neginf=None):
|
|
1859
2097
|
x = get_ov_output(x)
|
|
1860
2098
|
dtype = x.get_element_type()
|
|
@@ -2005,6 +2243,10 @@ def prod(x, axis=None, keepdims=False, dtype=None):
|
|
|
2005
2243
|
return OpenVINOKerasTensor(result)
|
|
2006
2244
|
|
|
2007
2245
|
|
|
2246
|
+
def ptp(x, axis=None, keepdims=False):
|
|
2247
|
+
raise NotImplementedError("`ptp` is not supported with openvino backend")
|
|
2248
|
+
|
|
2249
|
+
|
|
2008
2250
|
def quantile(x, q, axis=None, method="linear", keepdims=False):
|
|
2009
2251
|
raise NotImplementedError(
|
|
2010
2252
|
"`quantile` is not supported with openvino backend"
|
|
@@ -2141,7 +2383,14 @@ def sinh(x):
|
|
|
2141
2383
|
|
|
2142
2384
|
|
|
2143
2385
|
def size(x):
|
|
2144
|
-
|
|
2386
|
+
x = get_ov_output(x)
|
|
2387
|
+
shape_tensor = ov_opset.shape_of(x, output_type=Type.i64)
|
|
2388
|
+
final_size = ov_opset.reduce_prod(
|
|
2389
|
+
shape_tensor,
|
|
2390
|
+
ov_opset.constant([0], Type.i64),
|
|
2391
|
+
keep_dims=False,
|
|
2392
|
+
)
|
|
2393
|
+
return OpenVINOKerasTensor(final_size.output(0))
|
|
2145
2394
|
|
|
2146
2395
|
|
|
2147
2396
|
def sort(x, axis=-1):
|
|
@@ -2283,9 +2532,20 @@ def std(x, axis=None, keepdims=False):
|
|
|
2283
2532
|
|
|
2284
2533
|
|
|
2285
2534
|
def swapaxes(x, axis1, axis2):
|
|
2286
|
-
|
|
2287
|
-
|
|
2288
|
-
|
|
2535
|
+
x = get_ov_output(x)
|
|
2536
|
+
x_shape = x.get_partial_shape()
|
|
2537
|
+
if x_shape.rank.is_dynamic:
|
|
2538
|
+
raise ValueError(
|
|
2539
|
+
"`swapaxes` does not support tensors with dynamic rank for the "
|
|
2540
|
+
"OpenVINO backend."
|
|
2541
|
+
)
|
|
2542
|
+
rank = x_shape.rank.get_length()
|
|
2543
|
+
axis1 = canonicalize_axis(axis1, rank)
|
|
2544
|
+
axis2 = canonicalize_axis(axis2, rank)
|
|
2545
|
+
axes = list(range(rank))
|
|
2546
|
+
axes[axis1], axes[axis2] = axes[axis2], axes[axis1]
|
|
2547
|
+
result = ov_opset.transpose(x, ov_opset.constant(axes, Type.i32))
|
|
2548
|
+
return OpenVINOKerasTensor(result.output(0))
|
|
2289
2549
|
|
|
2290
2550
|
|
|
2291
2551
|
def take(x, indices, axis=None):
|
|
@@ -2404,7 +2664,8 @@ def tile(x, repeats):
|
|
|
2404
2664
|
|
|
2405
2665
|
|
|
2406
2666
|
def trace(x, offset=0, axis1=0, axis2=1):
|
|
2407
|
-
|
|
2667
|
+
x = diagonal(x, offset=offset, axis1=axis1, axis2=axis2)
|
|
2668
|
+
return sum(x, axis=-1)
|
|
2408
2669
|
|
|
2409
2670
|
|
|
2410
2671
|
def tri(N, M=None, k=0, dtype=None):
|
|
@@ -2937,6 +3198,66 @@ def slogdet(x):
|
|
|
2937
3198
|
|
|
2938
3199
|
|
|
2939
3200
|
def argpartition(x, kth, axis=-1):
|
|
2940
|
-
|
|
2941
|
-
|
|
3201
|
+
x = get_ov_output(x)
|
|
3202
|
+
x_shape = x.get_partial_shape()
|
|
3203
|
+
rank = x_shape.rank.get_length()
|
|
3204
|
+
axis = canonicalize_axis(axis, rank)
|
|
3205
|
+
axes = list(range(rank))
|
|
3206
|
+
axes[axis], axes[-1] = axes[-1], axes[axis]
|
|
3207
|
+
x = ov_opset.transpose(x, ov_opset.constant(axes))
|
|
3208
|
+
x_shape_tensor = ov_opset.shape_of(x)
|
|
3209
|
+
n = ov_opset.gather(
|
|
3210
|
+
x_shape_tensor,
|
|
3211
|
+
ov_opset.constant(-1),
|
|
3212
|
+
ov_opset.constant(0),
|
|
3213
|
+
)
|
|
3214
|
+
if isinstance(kth, int) and kth < 0:
|
|
3215
|
+
kth_tensor = ov_opset.add(
|
|
3216
|
+
n,
|
|
3217
|
+
ov_opset.constant(kth, n.get_element_type()),
|
|
3218
|
+
)
|
|
3219
|
+
else:
|
|
3220
|
+
kth_tensor = ov_opset.constant(kth, n.get_element_type())
|
|
3221
|
+
one = ov_opset.constant(1, kth_tensor.get_element_type())
|
|
3222
|
+
k_val = ov_opset.add(kth_tensor, one)
|
|
3223
|
+
bottom_ind = ov_opset.topk(
|
|
3224
|
+
ov_opset.negative(x),
|
|
3225
|
+
k=k_val,
|
|
3226
|
+
axis=-1,
|
|
3227
|
+
mode="max",
|
|
3228
|
+
sort="value",
|
|
3229
|
+
).output(1)
|
|
3230
|
+
one_hot_mask = ov_opset.one_hot(
|
|
3231
|
+
bottom_ind,
|
|
3232
|
+
n,
|
|
3233
|
+
ov_opset.constant(1),
|
|
3234
|
+
ov_opset.constant(0),
|
|
3235
|
+
axis=-1,
|
|
2942
3236
|
)
|
|
3237
|
+
mask = ov_opset.reduce_sum(
|
|
3238
|
+
one_hot_mask,
|
|
3239
|
+
ov_opset.constant([-2]),
|
|
3240
|
+
keep_dims=False,
|
|
3241
|
+
)
|
|
3242
|
+
ones = ov_opset.broadcast(
|
|
3243
|
+
ov_opset.constant(1),
|
|
3244
|
+
x_shape_tensor,
|
|
3245
|
+
)
|
|
3246
|
+
proxy = ov_opset.subtract(ones, mask)
|
|
3247
|
+
remaining_k = ov_opset.subtract(n, k_val)
|
|
3248
|
+
top_ind = ov_opset.topk(
|
|
3249
|
+
proxy,
|
|
3250
|
+
k=remaining_k,
|
|
3251
|
+
axis=-1,
|
|
3252
|
+
mode="max",
|
|
3253
|
+
sort="value",
|
|
3254
|
+
).output(1)
|
|
3255
|
+
result = ov_opset.concat([bottom_ind, top_ind], axis=-1)
|
|
3256
|
+
inv_axes = [0] * rank
|
|
3257
|
+
for i, a in enumerate(axes):
|
|
3258
|
+
inv_axes[a] = i
|
|
3259
|
+
result = ov_opset.transpose(
|
|
3260
|
+
result,
|
|
3261
|
+
ov_opset.constant(inv_axes),
|
|
3262
|
+
).output(0)
|
|
3263
|
+
return OpenVINOKerasTensor(result)
|