keras-rs-nightly 0.0.1.dev2025042803__py3-none-any.whl → 0.0.1.dev2025043003__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of keras-rs-nightly might be problematic. Click here for more details.
- keras_rs/src/layers/feature_interaction/dot_interaction.py +47 -0
- keras_rs/src/layers/feature_interaction/feature_cross.py +25 -6
- keras_rs/src/layers/retrieval/hard_negative_mining.py +25 -7
- keras_rs/src/layers/retrieval/remove_accidental_hits.py +18 -5
- keras_rs/src/layers/retrieval/sampling_probability_correction.py +18 -3
- keras_rs/src/losses/pairwise_hinge_loss.py +66 -3
- keras_rs/src/losses/pairwise_logistic_loss.py +67 -4
- keras_rs/src/losses/pairwise_loss.py +14 -7
- keras_rs/src/losses/pairwise_mean_squared_error.py +66 -3
- keras_rs/src/losses/pairwise_soft_zero_one_loss.py +70 -4
- keras_rs/src/metrics/dcg.py +33 -12
- keras_rs/src/metrics/mean_average_precision.py +36 -18
- keras_rs/src/metrics/mean_reciprocal_rank.py +25 -2
- keras_rs/src/metrics/ndcg.py +42 -29
- keras_rs/src/metrics/precision_at_k.py +25 -2
- keras_rs/src/metrics/ranking_metric.py +4 -2
- keras_rs/src/metrics/recall_at_k.py +25 -2
- keras_rs/src/utils/doc_string_utils.py +6 -1
- keras_rs/src/version.py +1 -1
- {keras_rs_nightly-0.0.1.dev2025042803.dist-info → keras_rs_nightly-0.0.1.dev2025043003.dist-info}/METADATA +75 -1
- keras_rs_nightly-0.0.1.dev2025043003.dist-info/RECORD +42 -0
- {keras_rs_nightly-0.0.1.dev2025042803.dist-info → keras_rs_nightly-0.0.1.dev2025043003.dist-info}/WHEEL +1 -1
- keras_rs_nightly-0.0.1.dev2025042803.dist-info/RECORD +0 -42
- {keras_rs_nightly-0.0.1.dev2025042803.dist-info → keras_rs_nightly-0.0.1.dev2025043003.dist-info}/top_level.txt +0 -0
|
@@ -30,6 +30,53 @@ class DotInteraction(keras.layers.Layer):
|
|
|
30
30
|
but is much slower.
|
|
31
31
|
**kwargs: Args to pass to the base class.
|
|
32
32
|
|
|
33
|
+
Example:
|
|
34
|
+
|
|
35
|
+
```python
|
|
36
|
+
# 1. Simple forward pass
|
|
37
|
+
batch_size = 2
|
|
38
|
+
embedding_dim = 32
|
|
39
|
+
feature1 = np.random.randn(batch_size, embedding_dim)
|
|
40
|
+
feature2 = np.random.randn(batch_size, embedding_dim)
|
|
41
|
+
feature3 = np.random.randn(batch_size, embedding_dim)
|
|
42
|
+
feature_interactions = keras_rs.layers.DotInteraction()(
|
|
43
|
+
[feature1, feature2, feature3]
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
# 2. After embedding layer in a model
|
|
47
|
+
vocabulary_size = 32
|
|
48
|
+
embedding_dim = 6
|
|
49
|
+
|
|
50
|
+
# Create a simple model containing the layer.
|
|
51
|
+
feature_input_1 = keras.Input(shape=(), name='indices_1', dtype="int32")
|
|
52
|
+
feature_input_2 = keras.Input(shape=(), name='indices_2', dtype="int32")
|
|
53
|
+
feature_input_3 = keras.Input(shape=(), name='indices_3', dtype="int32")
|
|
54
|
+
x1 = keras.layers.Embedding(
|
|
55
|
+
input_dim=vocabulary_size,
|
|
56
|
+
output_dim=embedding_dim
|
|
57
|
+
)(feature_input_1)
|
|
58
|
+
x2 = keras.layers.Embedding(
|
|
59
|
+
input_dim=vocabulary_size,
|
|
60
|
+
output_dim=embedding_dim
|
|
61
|
+
)(feature_input_2)
|
|
62
|
+
x3 = keras.layers.Embedding(
|
|
63
|
+
input_dim=vocabulary_size,
|
|
64
|
+
output_dim=embedding_dim
|
|
65
|
+
)(feature_input_3)
|
|
66
|
+
feature_interactions = keras_rs.layers.DotInteraction()([x1, x2, x3])
|
|
67
|
+
output = keras.layers.Dense(units=10)(x2)
|
|
68
|
+
model = keras.Model(
|
|
69
|
+
[feature_input_1, feature_input_2, feature_input_3], output
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
# Call the model on the inputs.
|
|
73
|
+
batch_size = 2
|
|
74
|
+
f1 = np.random.randint(0, vocabulary_size, size=(batch_size,))
|
|
75
|
+
f2 = np.random.randint(0, vocabulary_size, size=(batch_size,))
|
|
76
|
+
f3 = np.random.randint(0, vocabulary_size, size=(batch_size,))
|
|
77
|
+
outputs = model([f1, f2, f3])
|
|
78
|
+
```
|
|
79
|
+
|
|
33
80
|
References:
|
|
34
81
|
- [M. Naumov et al.](https://arxiv.org/abs/1906.00091)
|
|
35
82
|
"""
|
|
@@ -57,13 +57,32 @@ class FeatureCross(keras.layers.Layer):
|
|
|
57
57
|
Example:
|
|
58
58
|
|
|
59
59
|
```python
|
|
60
|
-
#
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
60
|
+
# 1. Simple forward pass
|
|
61
|
+
batch_size = 2
|
|
62
|
+
embedding_dim = 32
|
|
63
|
+
feature1 = np.random.randn(batch_size, embedding_dim)
|
|
64
|
+
feature2 = np.random.randn(batch_size, embedding_dim)
|
|
65
|
+
crossed_features = keras_rs.layers.FeatureCross()(feature1, feature2)
|
|
66
|
+
|
|
67
|
+
# 2. After embedding layer in a model
|
|
68
|
+
vocabulary_size = 32
|
|
69
|
+
embedding_dim = 6
|
|
70
|
+
|
|
71
|
+
# Create a simple model containing the layer.
|
|
72
|
+
inputs = keras.Input(shape=(), name='indices', dtype="int32")
|
|
73
|
+
x0 = keras.layers.Embedding(
|
|
74
|
+
input_dim=vocabulary_size,
|
|
75
|
+
output_dim=embedding_dim
|
|
76
|
+
)(inputs)
|
|
77
|
+
x1 = keras_rs.layers.FeatureCross()(x0, x0)
|
|
78
|
+
x2 = keras_rs.layers.FeatureCross()(x0, x1)
|
|
65
79
|
logits = keras.layers.Dense(units=10)(x2)
|
|
66
|
-
model = keras.Model(
|
|
80
|
+
model = keras.Model(inputs, logits)
|
|
81
|
+
|
|
82
|
+
# Call the model on the inputs.
|
|
83
|
+
batch_size = 2
|
|
84
|
+
input_data = np.random.randint(0, vocabulary_size, size=(batch_size,))
|
|
85
|
+
outputs = model(input_data)
|
|
67
86
|
```
|
|
68
87
|
|
|
69
88
|
References:
|
|
@@ -12,11 +12,27 @@ MAX_FLOAT = ml_dtypes.finfo("float32").max / 100.0
|
|
|
12
12
|
|
|
13
13
|
@keras_rs_export("keras_rs.layers.HardNegativeMining")
|
|
14
14
|
class HardNegativeMining(keras.layers.Layer):
|
|
15
|
-
"""
|
|
15
|
+
"""Filter logits and labels to return hard negatives.
|
|
16
|
+
|
|
17
|
+
The output will include logits and labels for the requested number of hard
|
|
18
|
+
negatives as well as the positive candidate.
|
|
16
19
|
|
|
17
20
|
Args:
|
|
18
21
|
num_hard_negatives: How many hard negatives to return.
|
|
19
22
|
**kwargs: Args to pass to the base class.
|
|
23
|
+
|
|
24
|
+
Example:
|
|
25
|
+
|
|
26
|
+
```python
|
|
27
|
+
# Create layer with the configured number of hard negatives to mine.
|
|
28
|
+
hard_negative_mining = keras_rs.layers.HardNegativeMining(
|
|
29
|
+
num_hard_negatives=10
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
# This will retrieve the top 10 negative candidates plus the positive
|
|
33
|
+
# candidate from `labels` for each row.
|
|
34
|
+
out_logits, out_labels = hard_negative_mining(in_logits, in_labels)
|
|
35
|
+
```
|
|
20
36
|
"""
|
|
21
37
|
|
|
22
38
|
def __init__(self, num_hard_negatives: int, **kwargs: Any) -> None:
|
|
@@ -33,15 +49,17 @@ class HardNegativeMining(keras.layers.Layer):
|
|
|
33
49
|
negatives as well as the positive candidate.
|
|
34
50
|
|
|
35
51
|
Args:
|
|
36
|
-
logits: logits tensor, typically `[batch_size, num_candidates]`
|
|
37
|
-
can have more dimensions or be 1D as `[num_candidates]`.
|
|
38
|
-
labels: one-hot labels tensor, must be the same shape as
|
|
52
|
+
logits: The logits tensor, typically `[batch_size, num_candidates]`
|
|
53
|
+
but can have more dimensions or be 1D as `[num_candidates]`.
|
|
54
|
+
labels: The one-hot labels tensor, must be the same shape as
|
|
55
|
+
`logits`.
|
|
39
56
|
|
|
40
57
|
Returns:
|
|
41
|
-
tuple containing two tensors with the last dimension of
|
|
58
|
+
A tuple containing two tensors with the last dimension of
|
|
42
59
|
`num_candidates` replaced with `num_hard_negatives + 1`.
|
|
43
|
-
|
|
44
|
-
|
|
60
|
+
|
|
61
|
+
* logits: `[..., num_hard_negatives + 1]` tensor of logits.
|
|
62
|
+
* labels: `[..., num_hard_negatives + 1]` one-hot tensor of labels.
|
|
45
63
|
"""
|
|
46
64
|
|
|
47
65
|
# Number of sampled logits, i.e, the number of hard negatives to be
|
|
@@ -15,6 +15,18 @@ class RemoveAccidentalHits(keras.layers.Layer):
|
|
|
15
15
|
|
|
16
16
|
Zeroes the logits of negative candidates that have the same ID as the
|
|
17
17
|
positive candidate in that row.
|
|
18
|
+
|
|
19
|
+
Example:
|
|
20
|
+
|
|
21
|
+
```python
|
|
22
|
+
# Create layer with the configured number of hard negatives to mine.
|
|
23
|
+
remove_accidental_hits = keras_rs.layers.RemoveAccidentalHits()
|
|
24
|
+
|
|
25
|
+
# This will zero the logits of negative candidates that have the same ID as
|
|
26
|
+
# the positive candidate from `labels` so as to not negatively impact the
|
|
27
|
+
# true positive.
|
|
28
|
+
logits = remove_accidental_hits(logits, labels, candidate_ids)
|
|
29
|
+
```
|
|
18
30
|
"""
|
|
19
31
|
|
|
20
32
|
def call(
|
|
@@ -29,16 +41,17 @@ class RemoveAccidentalHits(keras.layers.Layer):
|
|
|
29
41
|
have the same ID as the positive candidate in that row.
|
|
30
42
|
|
|
31
43
|
Args:
|
|
32
|
-
logits: logits tensor, typically `[batch_size, num_candidates]`
|
|
33
|
-
can have more dimensions or be 1D as `[num_candidates]`.
|
|
34
|
-
labels: one-hot labels tensor, must be the same shape as
|
|
35
|
-
|
|
44
|
+
logits: The logits tensor, typically `[batch_size, num_candidates]`
|
|
45
|
+
but can have more dimensions or be 1D as `[num_candidates]`.
|
|
46
|
+
labels: The one-hot labels tensor, must be the same shape as
|
|
47
|
+
`logits`.
|
|
48
|
+
candidate_ids: The candidate identifiers tensor, can be
|
|
36
49
|
`[num_candidates]` or `[batch_size, num_candidates]` or have
|
|
37
50
|
more dimensions as long as they match the last dimensions of
|
|
38
51
|
`labels`.
|
|
39
52
|
|
|
40
53
|
Returns:
|
|
41
|
-
|
|
54
|
+
The modified logits with the same shape as the input logits.
|
|
42
55
|
"""
|
|
43
56
|
# A more principled way is to implement
|
|
44
57
|
# `softmax_cross_entropy_with_logits` with a input mask. Here we
|
|
@@ -17,6 +17,18 @@ class SamplingProbabilityCorrection(keras.layers.Layer):
|
|
|
17
17
|
epsilon: float. Small float added to sampling probability to avoid
|
|
18
18
|
taking the log of zero. Defaults to 1e-6.
|
|
19
19
|
**kwargs: Args to pass to the base class.
|
|
20
|
+
|
|
21
|
+
Example:
|
|
22
|
+
|
|
23
|
+
```python
|
|
24
|
+
# Create the layer.
|
|
25
|
+
sampling_probability_correction = (
|
|
26
|
+
keras_rs.layers.SamplingProbabilityCorrection()
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
# Correct the logits based on the provided candidate sampling probability.
|
|
30
|
+
logits = sampling_probability_correction(logits, probabilities)
|
|
31
|
+
```
|
|
20
32
|
"""
|
|
21
33
|
|
|
22
34
|
def __init__(self, epsilon: float = 1e-6, **kwargs: Any) -> None:
|
|
@@ -32,11 +44,14 @@ class SamplingProbabilityCorrection(keras.layers.Layer):
|
|
|
32
44
|
"""Corrects input logits to account for candidate sampling probability.
|
|
33
45
|
|
|
34
46
|
Args:
|
|
35
|
-
logits: The logits to correct
|
|
36
|
-
|
|
47
|
+
logits: The logits tensor to correct, typically
|
|
48
|
+
`[batch_size, num_candidates]` but can have more dimensions or
|
|
49
|
+
be 1D as `[num_candidates]`.
|
|
50
|
+
candidate_sampling_probability: The sampling probability with the
|
|
51
|
+
same shape as `logits`.
|
|
37
52
|
|
|
38
53
|
Returns:
|
|
39
|
-
The corrected logits.
|
|
54
|
+
The corrected logits with the same shape as the input logits.
|
|
40
55
|
"""
|
|
41
56
|
return logits - ops.log(
|
|
42
57
|
ops.clip(candidate_sampling_probability, self.epsilon, 1.0)
|
|
@@ -14,14 +14,77 @@ class PairwiseHingeLoss(PairwiseLoss):
|
|
|
14
14
|
|
|
15
15
|
formula = "loss = sum_{i} sum_{j} I(y_i > y_j) * max(0, 1 - (s_i - s_j))"
|
|
16
16
|
explanation = """
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
17
|
+
- `max(0, 1 - (s_i - s_j))` is the hinge loss, which penalizes cases where
|
|
18
|
+
the score difference `s_i - s_j` is not sufficiently large when
|
|
19
|
+
`y_i > y_j`.
|
|
20
20
|
"""
|
|
21
21
|
extra_args = ""
|
|
22
|
+
example = """
|
|
23
|
+
With `compile()` API:
|
|
24
|
+
|
|
25
|
+
```python
|
|
26
|
+
model.compile(
|
|
27
|
+
loss=keras_rs.losses.PairwiseHingeLoss(),
|
|
28
|
+
...
|
|
29
|
+
)
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
As a standalone function with unbatched inputs:
|
|
33
|
+
|
|
34
|
+
>>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
|
|
35
|
+
>>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
|
|
36
|
+
>>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
|
|
37
|
+
>>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
|
|
38
|
+
2.32000
|
|
39
|
+
|
|
40
|
+
With batched inputs using default 'auto'/'sum_over_batch_size' reduction:
|
|
41
|
+
|
|
42
|
+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
43
|
+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
44
|
+
>>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
|
|
45
|
+
>>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
|
|
46
|
+
0.75
|
|
47
|
+
|
|
48
|
+
With masked inputs (useful for ragged inputs):
|
|
49
|
+
|
|
50
|
+
>>> y_true = {
|
|
51
|
+
... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
|
|
52
|
+
... "mask": np.array(
|
|
53
|
+
... [[True, True, True, True], [True, True, False, False]]
|
|
54
|
+
... ),
|
|
55
|
+
... }
|
|
56
|
+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
57
|
+
>>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
|
|
58
|
+
0.64999
|
|
59
|
+
|
|
60
|
+
With `sample_weight`:
|
|
61
|
+
|
|
62
|
+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
63
|
+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
64
|
+
>>> sample_weight = np.array(
|
|
65
|
+
... [[2.0, 3.0, 1.0, 1.0], [2.0, 1.0, 0.0, 0.0]]
|
|
66
|
+
... )
|
|
67
|
+
>>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
|
|
68
|
+
>>> pairwise_hinge_loss(
|
|
69
|
+
... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight
|
|
70
|
+
... )
|
|
71
|
+
1.02499
|
|
72
|
+
|
|
73
|
+
Using `'none'` reduction:
|
|
74
|
+
|
|
75
|
+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
76
|
+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
77
|
+
>>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss(
|
|
78
|
+
... reduction="none"
|
|
79
|
+
... )
|
|
80
|
+
>>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
|
|
81
|
+
[[3. , 0. , 2. , 0.], [0., 0.20000005, 0.79999995, 0.]]
|
|
82
|
+
"""
|
|
83
|
+
|
|
22
84
|
PairwiseHingeLoss.__doc__ = pairwise_loss_subclass_doc_string.format(
|
|
23
85
|
loss_name="hinge loss",
|
|
24
86
|
formula=formula,
|
|
25
87
|
explanation=explanation,
|
|
26
88
|
extra_args=extra_args,
|
|
89
|
+
example=example,
|
|
27
90
|
)
|
|
@@ -22,15 +22,78 @@ class PairwiseLogisticLoss(PairwiseLoss):
|
|
|
22
22
|
|
|
23
23
|
formula = "loss = sum_{i} sum_{j} I(y_i > y_j) * log(1 + exp(-(s_i - s_j)))"
|
|
24
24
|
explanation = """
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
25
|
+
- `log(1 + exp(-(s_i - s_j)))` is the logistic loss, which penalizes
|
|
26
|
+
cases where the score difference `s_i - s_j` is not sufficiently large
|
|
27
|
+
when `y_i > y_j`. This function provides a smooth approximation of the
|
|
28
|
+
ideal step function, making it suitable for gradient-based optimization.
|
|
29
29
|
"""
|
|
30
30
|
extra_args = ""
|
|
31
|
+
example = """
|
|
32
|
+
With `compile()` API:
|
|
33
|
+
|
|
34
|
+
```python
|
|
35
|
+
model.compile(
|
|
36
|
+
loss=keras_rs.losses.PairwiseLogisticLoss(),
|
|
37
|
+
...
|
|
38
|
+
)
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
As a standalone function with unbatched inputs:
|
|
42
|
+
|
|
43
|
+
>>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
|
|
44
|
+
>>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
|
|
45
|
+
>>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
|
|
46
|
+
>>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
|
|
47
|
+
>>> 1.70708
|
|
48
|
+
|
|
49
|
+
With batched inputs using default 'auto'/'sum_over_batch_size' reduction:
|
|
50
|
+
|
|
51
|
+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
52
|
+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
53
|
+
>>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
|
|
54
|
+
>>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
|
|
55
|
+
0.73936
|
|
56
|
+
|
|
57
|
+
With masked inputs (useful for ragged inputs):
|
|
58
|
+
|
|
59
|
+
>>> y_true = {
|
|
60
|
+
... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
|
|
61
|
+
... "mask": np.array(
|
|
62
|
+
... [[True, True, True, True], [True, True, False, False]]
|
|
63
|
+
... ),
|
|
64
|
+
... }
|
|
65
|
+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
66
|
+
>>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
|
|
67
|
+
0.53751
|
|
68
|
+
|
|
69
|
+
With `sample_weight`:
|
|
70
|
+
|
|
71
|
+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
72
|
+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
73
|
+
>>> sample_weight = np.array(
|
|
74
|
+
... [[2.0, 3.0, 1.0, 1.0], [2.0, 1.0, 0.0, 0.0]]
|
|
75
|
+
... )
|
|
76
|
+
>>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
|
|
77
|
+
>>> pairwise_logistic_loss(
|
|
78
|
+
... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight
|
|
79
|
+
... )
|
|
80
|
+
>>> 0.80337
|
|
81
|
+
|
|
82
|
+
Using `'none'` reduction:
|
|
83
|
+
|
|
84
|
+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
85
|
+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
86
|
+
>>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss(
|
|
87
|
+
... reduction="none"
|
|
88
|
+
... )
|
|
89
|
+
>>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
|
|
90
|
+
[[2.126928, 0., 1.3132616, 0.48877698], [0., 0.20000005, 0.79999995, 0.]]
|
|
91
|
+
"""
|
|
92
|
+
|
|
31
93
|
PairwiseLogisticLoss.__doc__ = pairwise_loss_subclass_doc_string.format(
|
|
32
94
|
loss_name="logistic loss",
|
|
33
95
|
formula=formula,
|
|
34
96
|
explanation=explanation,
|
|
35
97
|
extra_args=extra_args,
|
|
98
|
+
example=example,
|
|
36
99
|
)
|
|
@@ -70,7 +70,8 @@ class PairwiseLoss(keras.losses.Loss, abc.ABC):
|
|
|
70
70
|
y_true: types.Tensor,
|
|
71
71
|
y_pred: types.Tensor,
|
|
72
72
|
) -> types.Tensor:
|
|
73
|
-
"""
|
|
73
|
+
"""Compute the pairwise loss.
|
|
74
|
+
|
|
74
75
|
Args:
|
|
75
76
|
y_true: tensor or dict. Ground truth values. If tensor, of shape
|
|
76
77
|
`(list_size)` for unbatched inputs or `(batch_size, list_size)`
|
|
@@ -83,6 +84,9 @@ class PairwiseLoss(keras.losses.Loss, abc.ABC):
|
|
|
83
84
|
y_pred: tensor. The predicted values, of shape `(list_size)` for
|
|
84
85
|
unbatched inputs or `(batch_size, list_size)` for batched
|
|
85
86
|
inputs. Should be of the same shape as `y_true`.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
The loss.
|
|
86
90
|
"""
|
|
87
91
|
mask = None
|
|
88
92
|
if isinstance(y_true, dict):
|
|
@@ -134,11 +138,12 @@ pairwise_loss_subclass_doc_string = (
|
|
|
134
138
|
```
|
|
135
139
|
|
|
136
140
|
where:
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
141
|
+
|
|
142
|
+
- `y_i` and `y_j` are the true labels of items `i` and `j`, respectively.
|
|
143
|
+
- `s_i` and `s_j` are the predicted scores of items `i` and `j`,
|
|
144
|
+
respectively.
|
|
145
|
+
- `I(y_i > y_j)` is an indicator function that equals 1 if `y_i > y_j`,
|
|
146
|
+
and 0 otherwise.{explanation}
|
|
142
147
|
Args:{extra_args}
|
|
143
148
|
reduction: Type of reduction to apply to the loss. In almost all cases
|
|
144
149
|
this should be `"sum_over_batch_size"`. Supported options are
|
|
@@ -154,5 +159,7 @@ pairwise_loss_subclass_doc_string = (
|
|
|
154
159
|
`"float32"` unless set to different value
|
|
155
160
|
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
|
|
156
161
|
provided, then the `compute_dtype` will be utilized.
|
|
157
|
-
|
|
162
|
+
|
|
163
|
+
Examples:
|
|
164
|
+
{example}"""
|
|
158
165
|
)
|
|
@@ -59,14 +59,77 @@ class PairwiseMeanSquaredError(PairwiseLoss):
|
|
|
59
59
|
|
|
60
60
|
formula = "loss = sum_{i} sum_{j} I(y_i > y_j) * (s_i - s_j)^2"
|
|
61
61
|
explanation = """
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
62
|
+
- `(s_i - s_j)^2` is the squared difference between the predicted scores
|
|
63
|
+
of items `i` and `j`, which penalizes discrepancies between the predicted
|
|
64
|
+
order of items relative to their true order.
|
|
65
65
|
"""
|
|
66
66
|
extra_args = ""
|
|
67
|
+
example = """
|
|
68
|
+
With `compile()` API:
|
|
69
|
+
|
|
70
|
+
```python
|
|
71
|
+
model.compile(
|
|
72
|
+
loss=keras_rs.losses.PairwiseMeanSquaredError(),
|
|
73
|
+
...
|
|
74
|
+
)
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
As a standalone function with unbatched inputs:
|
|
78
|
+
|
|
79
|
+
>>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
|
|
80
|
+
>>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
|
|
81
|
+
>>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError()
|
|
82
|
+
>>> pairwise_mse(y_true=y_true, y_pred=y_pred)
|
|
83
|
+
>>> 19.10400
|
|
84
|
+
|
|
85
|
+
With batched inputs using default 'auto'/'sum_over_batch_size' reduction:
|
|
86
|
+
|
|
87
|
+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
88
|
+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
89
|
+
>>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError()
|
|
90
|
+
>>> pairwise_mse(y_true=y_true, y_pred=y_pred)
|
|
91
|
+
5.57999
|
|
92
|
+
|
|
93
|
+
With masked inputs (useful for ragged inputs):
|
|
94
|
+
|
|
95
|
+
>>> y_true = {
|
|
96
|
+
... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
|
|
97
|
+
... "mask": np.array(
|
|
98
|
+
... [[True, True, True, True], [True, True, False, False]]
|
|
99
|
+
... ),
|
|
100
|
+
... }
|
|
101
|
+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
102
|
+
>>> pairwise_mse(y_true=y_true, y_pred=y_pred)
|
|
103
|
+
4.76000
|
|
104
|
+
|
|
105
|
+
With `sample_weight`:
|
|
106
|
+
|
|
107
|
+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
108
|
+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
109
|
+
>>> sample_weight = np.array(
|
|
110
|
+
... [[2.0, 3.0, 1.0, 1.0], [2.0, 1.0, 0.0, 0.0]]
|
|
111
|
+
... )
|
|
112
|
+
>>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError()
|
|
113
|
+
>>> pairwise_mse(
|
|
114
|
+
... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight
|
|
115
|
+
... )
|
|
116
|
+
11.0500
|
|
117
|
+
|
|
118
|
+
Using `'none'` reduction:
|
|
119
|
+
|
|
120
|
+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
121
|
+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
122
|
+
>>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError(
|
|
123
|
+
... reduction="none"
|
|
124
|
+
... )
|
|
125
|
+
>>> pairwise_mse(y_true=y_true, y_pred=y_pred)
|
|
126
|
+
[[11., 17., 5., 5.], [2.04, 1.3199998, 1.6399999, 1.6399999]]
|
|
127
|
+
"""
|
|
128
|
+
|
|
67
129
|
PairwiseMeanSquaredError.__doc__ = pairwise_loss_subclass_doc_string.format(
|
|
68
130
|
loss_name="mean squared error",
|
|
69
131
|
formula=formula,
|
|
70
132
|
explanation=explanation,
|
|
71
133
|
extra_args=extra_args,
|
|
134
|
+
example=example,
|
|
72
135
|
)
|
|
@@ -18,15 +18,81 @@ class PairwiseSoftZeroOneLoss(PairwiseLoss):
|
|
|
18
18
|
|
|
19
19
|
formula = "loss = sum_{i} sum_{j} I(y_i > y_j) * (1 - sigmoid(s_i - s_j))"
|
|
20
20
|
explanation = """
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
21
|
+
- `(1 - sigmoid(s_i - s_j))` represents the soft zero-one loss, which
|
|
22
|
+
approximates the ideal zero-one loss (which would be 1 if `s_i < s_j`
|
|
23
|
+
and 0 otherwise) with a smooth, differentiable function. This makes it
|
|
24
|
+
suitable for gradient-based optimization.
|
|
25
25
|
"""
|
|
26
26
|
extra_args = ""
|
|
27
|
+
example = """
|
|
28
|
+
With `compile()` API:
|
|
29
|
+
|
|
30
|
+
```python
|
|
31
|
+
model.compile(
|
|
32
|
+
loss=keras_rs.losses.PairwiseSoftZeroOneLoss(),
|
|
33
|
+
...
|
|
34
|
+
)
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
As a standalone function with unbatched inputs:
|
|
38
|
+
|
|
39
|
+
>>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
|
|
40
|
+
>>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
|
|
41
|
+
>>> pairwise_soft_zero_one_loss = keras_rs.losses.PairwiseSoftZeroOneLoss()
|
|
42
|
+
>>> pairwise_soft_zero_one_loss(y_true=y_true, y_pred=y_pred)
|
|
43
|
+
0.86103
|
|
44
|
+
|
|
45
|
+
With batched inputs using default 'auto'/'sum_over_batch_size' reduction:
|
|
46
|
+
|
|
47
|
+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
48
|
+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
49
|
+
>>> pairwise_soft_zero_one_loss = keras_rs.losses.PairwiseSoftZeroOneLoss()
|
|
50
|
+
>>> pairwise_soft_zero_one_loss(y_true=y_true, y_pred=y_pred)
|
|
51
|
+
0.46202
|
|
52
|
+
|
|
53
|
+
With masked inputs (useful for ragged inputs):
|
|
54
|
+
|
|
55
|
+
>>> y_true = {
|
|
56
|
+
... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
|
|
57
|
+
... "mask": np.array(
|
|
58
|
+
... [[True, True, True, True], [True, True, False, False]]
|
|
59
|
+
... ),
|
|
60
|
+
... }
|
|
61
|
+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
62
|
+
>>> pairwise_soft_zero_one_loss(y_true=y_true, y_pred=y_pred)
|
|
63
|
+
0.29468
|
|
64
|
+
|
|
65
|
+
With `sample_weight`:
|
|
66
|
+
|
|
67
|
+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
68
|
+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
69
|
+
>>> sample_weight = np.array(
|
|
70
|
+
... [[2.0, 3.0, 1.0, 1.0], [2.0, 1.0, 0.0, 0.0]]
|
|
71
|
+
... )
|
|
72
|
+
>>> pairwise_soft_zero_one_loss = keras_rs.losses.PairwiseSoftZeroOneLoss()
|
|
73
|
+
>>> pairwise_soft_zero_one_loss(
|
|
74
|
+
... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight
|
|
75
|
+
... )
|
|
76
|
+
0.40478
|
|
77
|
+
|
|
78
|
+
Using `'none'` reduction:
|
|
79
|
+
|
|
80
|
+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
81
|
+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
82
|
+
>>> pairwise_soft_zero_one_loss = keras_rs.losses.PairwiseSoftZeroOneLoss(
|
|
83
|
+
... reduction="none"
|
|
84
|
+
... )
|
|
85
|
+
>>> pairwise_soft_zero_one_loss(y_true=y_true, y_pred=y_pred)
|
|
86
|
+
[
|
|
87
|
+
[0.8807971 , 0., 0.73105854, 0.43557024],
|
|
88
|
+
[0., 0.31002545, 0.7191075 , 0.61961967]
|
|
89
|
+
]
|
|
90
|
+
"""
|
|
91
|
+
|
|
27
92
|
PairwiseSoftZeroOneLoss.__doc__ = pairwise_loss_subclass_doc_string.format(
|
|
28
93
|
loss_name="soft zero-one loss",
|
|
29
94
|
formula=formula,
|
|
30
95
|
explanation=explanation,
|
|
31
96
|
extra_args=extra_args,
|
|
97
|
+
example=example,
|
|
32
98
|
)
|
keras_rs/src/metrics/dcg.py
CHANGED
|
@@ -11,7 +11,7 @@ from keras_rs.src.metrics.ranking_metric import (
|
|
|
11
11
|
ranking_metric_subclass_doc_string,
|
|
12
12
|
)
|
|
13
13
|
from keras_rs.src.metrics.ranking_metric import (
|
|
14
|
-
|
|
14
|
+
ranking_metric_subclass_doc_string_post_desc,
|
|
15
15
|
)
|
|
16
16
|
from keras_rs.src.metrics.ranking_metrics_utils import compute_dcg
|
|
17
17
|
from keras_rs.src.metrics.ranking_metrics_utils import default_gain_fn
|
|
@@ -111,22 +111,41 @@ DCG@k(y', w') = sum_{i=1}^{k} (gain_fn(y'_i) / rank_discount_fn(i))
|
|
|
111
111
|
```
|
|
112
112
|
|
|
113
113
|
where:
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
"""
|
|
114
|
+
- `y'_i` is the true relevance score of the item ranked at position `i`
|
|
115
|
+
(obtained by sorting `y_true` according to `y_pred`).
|
|
116
|
+
- `gain_fn` is the user-provided function mapping relevance `y'_i` to a
|
|
117
|
+
gain value. The default function (`default_gain_fn`) is typically
|
|
118
|
+
equivalent to `lambda y: 2**y - 1`.
|
|
119
|
+
- `rank_discount_fn` is the user-provided function mapping rank `i`
|
|
120
|
+
to a discount value. The default function (`default_rank_discount_fn`)
|
|
121
|
+
is typically equivalent to `lambda rank: 1 / log2(rank + 1)`.
|
|
122
|
+
- The final result aggregates these per-list scores."""
|
|
124
123
|
extra_args = """
|
|
125
124
|
gain_fn: callable. Maps relevance scores (`y_true`) to gain values. The
|
|
126
125
|
default implements `2**y - 1`.
|
|
127
126
|
rank_discount_fn: function. Maps rank positions to discount
|
|
128
127
|
values. The default (`default_rank_discount_fn`) implements
|
|
129
128
|
`1 / log2(rank + 1)`."""
|
|
129
|
+
example = """
|
|
130
|
+
>>> batch_size = 2
|
|
131
|
+
>>> list_size = 5
|
|
132
|
+
>>> labels = np.random.randint(0, 3, size=(batch_size, list_size))
|
|
133
|
+
>>> scores = np.random.random(size=(batch_size, list_size))
|
|
134
|
+
>>> metric = keras_rs.metrics.DCG()(
|
|
135
|
+
... y_true=labels, y_pred=scores
|
|
136
|
+
... )
|
|
137
|
+
|
|
138
|
+
Mask certain elements (can be used for uneven inputs):
|
|
139
|
+
|
|
140
|
+
>>> batch_size = 2
|
|
141
|
+
>>> list_size = 5
|
|
142
|
+
>>> labels = np.random.randint(0, 3, size=(batch_size, list_size))
|
|
143
|
+
>>> scores = np.random.random(size=(batch_size, list_size))
|
|
144
|
+
>>> mask = np.random.randint(0, 2, size=(batch_size, list_size), dtype=bool)
|
|
145
|
+
>>> metric = keras_rs.metrics.DCG()(
|
|
146
|
+
... y_true={"labels": labels, "mask": mask}, y_pred=scores
|
|
147
|
+
... )
|
|
148
|
+
"""
|
|
130
149
|
|
|
131
150
|
DCG.__doc__ = format_docstring(
|
|
132
151
|
ranking_metric_subclass_doc_string,
|
|
@@ -137,4 +156,6 @@ DCG.__doc__ = format_docstring(
|
|
|
137
156
|
relevance_type=relevance_type,
|
|
138
157
|
score_range_interpretation=score_range_interpretation,
|
|
139
158
|
formula=formula,
|
|
140
|
-
) +
|
|
159
|
+
) + ranking_metric_subclass_doc_string_post_desc.format(
|
|
160
|
+
extra_args=extra_args, example=example
|
|
161
|
+
)
|
|
@@ -7,7 +7,7 @@ from keras_rs.src.metrics.ranking_metric import (
|
|
|
7
7
|
ranking_metric_subclass_doc_string,
|
|
8
8
|
)
|
|
9
9
|
from keras_rs.src.metrics.ranking_metric import (
|
|
10
|
-
|
|
10
|
+
ranking_metric_subclass_doc_string_post_desc,
|
|
11
11
|
)
|
|
12
12
|
from keras_rs.src.metrics.ranking_metrics_utils import get_list_weights
|
|
13
13
|
from keras_rs.src.metrics.ranking_metrics_utils import sort_by_scores
|
|
@@ -82,23 +82,39 @@ rel(j) = y_i if rank(s_i) = j
|
|
|
82
82
|
```
|
|
83
83
|
|
|
84
84
|
where:
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
score `s_i`.
|
|
98
|
-
- `sum_i y_i` calculates the total number of relevant items in the
|
|
99
|
-
original list `y`.
|
|
100
|
-
"""
|
|
85
|
+
- `j` represents the rank position (starting from 1).
|
|
86
|
+
- `sum_j` indicates a summation over all ranks `j` from 1 up to the list
|
|
87
|
+
size (or `k`).
|
|
88
|
+
- `P@j(y, s)` denotes the Precision at rank `j`, calculated as the
|
|
89
|
+
number of relevant items found within the top `j` positions divided by `j`.
|
|
90
|
+
- `rel(j)` represents the relevance of the item specifically at rank
|
|
91
|
+
`j`. `rel(j)` is 1 if the item at rank `j` is relevant, and 0 otherwise.
|
|
92
|
+
- `y_i` is the true relevance label of the original item `i` before ranking.
|
|
93
|
+
- `rank(s_i)` is the rank position assigned to item `i` based on its score
|
|
94
|
+
`s_i`.
|
|
95
|
+
- `sum_i y_i` calculates the total number of relevant items in the original
|
|
96
|
+
list `y`."""
|
|
101
97
|
extra_args = ""
|
|
98
|
+
example = """
|
|
99
|
+
>>> batch_size = 2
|
|
100
|
+
>>> list_size = 5
|
|
101
|
+
>>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
|
|
102
|
+
>>> scores = np.random.random(size=(batch_size, list_size))
|
|
103
|
+
>>> metric = keras_rs.metrics.MeanAveragePrecision()(
|
|
104
|
+
... y_true=labels, y_pred=scores
|
|
105
|
+
... )
|
|
106
|
+
|
|
107
|
+
Mask certain elements (can be used for uneven inputs):
|
|
108
|
+
|
|
109
|
+
>>> batch_size = 2
|
|
110
|
+
>>> list_size = 5
|
|
111
|
+
>>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
|
|
112
|
+
>>> scores = np.random.random(size=(batch_size, list_size))
|
|
113
|
+
>>> mask = np.random.randint(0, 2, size=(batch_size, list_size), dtype=bool)
|
|
114
|
+
>>> metric = keras_rs.metrics.MeanAveragePrecision()(
|
|
115
|
+
... y_true={"labels": labels, "mask": mask}, y_pred=scores
|
|
116
|
+
... )
|
|
117
|
+
"""
|
|
102
118
|
|
|
103
119
|
MeanAveragePrecision.__doc__ = format_docstring(
|
|
104
120
|
ranking_metric_subclass_doc_string,
|
|
@@ -109,4 +125,6 @@ MeanAveragePrecision.__doc__ = format_docstring(
|
|
|
109
125
|
relevance_type=relevance_type,
|
|
110
126
|
score_range_interpretation=score_range_interpretation,
|
|
111
127
|
formula=formula,
|
|
112
|
-
) +
|
|
128
|
+
) + ranking_metric_subclass_doc_string_post_desc.format(
|
|
129
|
+
extra_args=extra_args, example=example
|
|
130
|
+
)
|
|
@@ -7,7 +7,7 @@ from keras_rs.src.metrics.ranking_metric import (
|
|
|
7
7
|
ranking_metric_subclass_doc_string,
|
|
8
8
|
)
|
|
9
9
|
from keras_rs.src.metrics.ranking_metric import (
|
|
10
|
-
|
|
10
|
+
ranking_metric_subclass_doc_string_post_desc,
|
|
11
11
|
)
|
|
12
12
|
from keras_rs.src.metrics.ranking_metrics_utils import get_list_weights
|
|
13
13
|
from keras_rs.src.metrics.ranking_metrics_utils import sort_by_scores
|
|
@@ -86,6 +86,27 @@ formula = """```
|
|
|
86
86
|
MRR(y, s) = max_{i} y_{i} / rank(s_{i})
|
|
87
87
|
```"""
|
|
88
88
|
extra_args = ""
|
|
89
|
+
example = """
|
|
90
|
+
>>> batch_size = 2
|
|
91
|
+
>>> list_size = 5
|
|
92
|
+
>>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
|
|
93
|
+
>>> scores = np.random.random(size=(batch_size, list_size))
|
|
94
|
+
>>> metric = keras_rs.metrics.MeanReciprocalRank()(
|
|
95
|
+
... y_true=labels, y_pred=scores
|
|
96
|
+
... )
|
|
97
|
+
|
|
98
|
+
Mask certain elements (can be used for uneven inputs):
|
|
99
|
+
|
|
100
|
+
>>> batch_size = 2
|
|
101
|
+
>>> list_size = 5
|
|
102
|
+
>>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
|
|
103
|
+
>>> scores = np.random.random(size=(batch_size, list_size))
|
|
104
|
+
>>> mask = np.random.randint(0, 2, size=(batch_size, list_size), dtype=bool)
|
|
105
|
+
>>> metric = keras_rs.metrics.MeanReciprocalRank()(
|
|
106
|
+
... y_true={"labels": labels, "mask": mask}, y_pred=scores
|
|
107
|
+
... )
|
|
108
|
+
"""
|
|
109
|
+
|
|
89
110
|
MeanReciprocalRank.__doc__ = format_docstring(
|
|
90
111
|
ranking_metric_subclass_doc_string,
|
|
91
112
|
width=80,
|
|
@@ -95,4 +116,6 @@ MeanReciprocalRank.__doc__ = format_docstring(
|
|
|
95
116
|
relevance_type=relevance_type,
|
|
96
117
|
score_range_interpretation=score_range_interpretation,
|
|
97
118
|
formula=formula,
|
|
98
|
-
) +
|
|
119
|
+
) + ranking_metric_subclass_doc_string_post_desc.format(
|
|
120
|
+
extra_args=extra_args, example=example
|
|
121
|
+
)
|
keras_rs/src/metrics/ndcg.py
CHANGED
|
@@ -11,7 +11,7 @@ from keras_rs.src.metrics.ranking_metric import (
|
|
|
11
11
|
ranking_metric_subclass_doc_string,
|
|
12
12
|
)
|
|
13
13
|
from keras_rs.src.metrics.ranking_metric import (
|
|
14
|
-
|
|
14
|
+
ranking_metric_subclass_doc_string_post_desc,
|
|
15
15
|
)
|
|
16
16
|
from keras_rs.src.metrics.ranking_metrics_utils import compute_dcg
|
|
17
17
|
from keras_rs.src.metrics.ranking_metrics_utils import default_gain_fn
|
|
@@ -109,7 +109,7 @@ class NDCG(RankingMetric):
|
|
|
109
109
|
|
|
110
110
|
concept_sentence = (
|
|
111
111
|
"It normalizes the Discounted Cumulative Gain (DCG) with the Ideal "
|
|
112
|
-
"Discounted Cumulative Gain (IDCG) for each list
|
|
112
|
+
"Discounted Cumulative Gain (IDCG) for each list"
|
|
113
113
|
)
|
|
114
114
|
relevance_type = (
|
|
115
115
|
"graded relevance scores (non-negative numbers where higher values "
|
|
@@ -124,11 +124,6 @@ score_range_interpretation = (
|
|
|
124
124
|
)
|
|
125
125
|
|
|
126
126
|
formula = """
|
|
127
|
-
The metric calculates a weighted average nDCG score per list.
|
|
128
|
-
For a single list, nDCG is computed as the ratio of the Discounted
|
|
129
|
-
Cumulative Gain (DCG) of the predicted ranking to the Ideal Discounted
|
|
130
|
-
Cumulative Gain (IDCG) of the best possible ranking:
|
|
131
|
-
|
|
132
127
|
```
|
|
133
128
|
nDCG@k = DCG@k / IDCG@k
|
|
134
129
|
```
|
|
@@ -147,28 +142,44 @@ IDCG@k(y'') = sum_{i=1}^{k} (gain_fn(y''_i) / rank_discount_fn(i))
|
|
|
147
142
|
```
|
|
148
143
|
|
|
149
144
|
where:
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
potentially involving normalization by list-specific weights, to
|
|
164
|
-
produce a weighted average.
|
|
165
|
-
"""
|
|
145
|
+
- `y'_i`: True relevance of the item at rank `i` in the ranking induced by
|
|
146
|
+
`y_pred`.
|
|
147
|
+
- `y''_i` True relevance of the item at rank `i` in the *ideal* ranking (sorted
|
|
148
|
+
by `y_true` descending).
|
|
149
|
+
- `gain_fn` is the user-provided function mapping relevance to gain. The default
|
|
150
|
+
function (`default_gain_fn`) is typically equivalent to `lambda y: 2**y - 1`.
|
|
151
|
+
- `rank_discount_fn` is the user-provided function mapping rank `i` (1-based) to
|
|
152
|
+
a discount value. The default function (`default_rank_discount_fn`) is
|
|
153
|
+
typically equivalent to `lambda rank: 1 / log2(rank + 1)`.
|
|
154
|
+
- If IDCG@k is 0 (e.g., no relevant items), nDCG@k is defined as 0.
|
|
155
|
+
- The final result often aggregates these per-list nDCG scores, potentially
|
|
156
|
+
involving normalization by list-specific weights, to produce a weighted
|
|
157
|
+
average."""
|
|
166
158
|
extra_args = """
|
|
167
|
-
gain_fn: callable. Maps relevance scores (`y_true`) to gain values. The
|
|
168
|
-
|
|
169
|
-
rank_discount_fn:
|
|
170
|
-
|
|
171
|
-
|
|
159
|
+
gain_fn: callable. Maps relevance scores (`y_true`) to gain values. The
|
|
160
|
+
default implements `2**y - 1`.
|
|
161
|
+
rank_discount_fn: function. Maps rank positions to discount
|
|
162
|
+
values. The default (`default_rank_discount_fn`) implements
|
|
163
|
+
`1 / log2(rank + 1)`."""
|
|
164
|
+
example = """
|
|
165
|
+
>>> batch_size = 2
|
|
166
|
+
>>> list_size = 5
|
|
167
|
+
>>> labels = np.random.randint(0, 3, size=(batch_size, list_size))
|
|
168
|
+
>>> scores = np.random.random(size=(batch_size, list_size))
|
|
169
|
+
>>> metric = keras_rs.metrics.NDCG()(
|
|
170
|
+
... y_true=labels, y_pred=scores
|
|
171
|
+
... )
|
|
172
|
+
|
|
173
|
+
Mask certain elements (can be used for uneven inputs):
|
|
174
|
+
|
|
175
|
+
>>> batch_size = 2
|
|
176
|
+
>>> list_size = 5
|
|
177
|
+
>>> labels = np.random.randint(0, 3, size=(batch_size, list_size))
|
|
178
|
+
>>> scores = np.random.random(size=(batch_size, list_size))
|
|
179
|
+
>>> mask = np.random.randint(0, 2, size=(batch_size, list_size), dtype=bool)
|
|
180
|
+
>>> metric = keras_rs.metrics.NDCG()(
|
|
181
|
+
... y_true={"labels": labels, "mask": mask}, y_pred=scores
|
|
182
|
+
... )
|
|
172
183
|
"""
|
|
173
184
|
|
|
174
185
|
NDCG.__doc__ = format_docstring(
|
|
@@ -181,4 +192,6 @@ NDCG.__doc__ = format_docstring(
|
|
|
181
192
|
score_range_interpretation=score_range_interpretation,
|
|
182
193
|
formula=formula,
|
|
183
194
|
extra_args=extra_args,
|
|
184
|
-
) +
|
|
195
|
+
) + ranking_metric_subclass_doc_string_post_desc.format(
|
|
196
|
+
extra_args=extra_args, example=example
|
|
197
|
+
)
|
|
@@ -7,7 +7,7 @@ from keras_rs.src.metrics.ranking_metric import (
|
|
|
7
7
|
ranking_metric_subclass_doc_string,
|
|
8
8
|
)
|
|
9
9
|
from keras_rs.src.metrics.ranking_metric import (
|
|
10
|
-
|
|
10
|
+
ranking_metric_subclass_doc_string_post_desc,
|
|
11
11
|
)
|
|
12
12
|
from keras_rs.src.metrics.ranking_metrics_utils import get_list_weights
|
|
13
13
|
from keras_rs.src.metrics.ranking_metrics_utils import sort_by_scores
|
|
@@ -82,6 +82,27 @@ P@k(y, s) = 1/k sum_i I[rank(s_i) < k] y_i
|
|
|
82
82
|
where `y_i` is the relevance label (0/1) of the item ranked at position
|
|
83
83
|
`i`, and `I[condition]` is 1 if the condition is met, otherwise 0."""
|
|
84
84
|
extra_args = ""
|
|
85
|
+
example = """
|
|
86
|
+
>>> batch_size = 2
|
|
87
|
+
>>> list_size = 5
|
|
88
|
+
>>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
|
|
89
|
+
>>> scores = np.random.random(size=(batch_size, list_size))
|
|
90
|
+
>>> metric = keras_rs.metrics.PrecisionAtK()(
|
|
91
|
+
... y_true=labels, y_pred=scores
|
|
92
|
+
... )
|
|
93
|
+
|
|
94
|
+
Mask certain elements (can be used for uneven inputs):
|
|
95
|
+
|
|
96
|
+
>>> batch_size = 2
|
|
97
|
+
>>> list_size = 5
|
|
98
|
+
>>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
|
|
99
|
+
>>> scores = np.random.random(size=(batch_size, list_size))
|
|
100
|
+
>>> mask = np.random.randint(0, 2, size=(batch_size, list_size), dtype=bool)
|
|
101
|
+
>>> metric = keras_rs.metrics.PrecisionAtK()(
|
|
102
|
+
... y_true={"labels": labels, "mask": mask}, y_pred=scores
|
|
103
|
+
... )
|
|
104
|
+
"""
|
|
105
|
+
|
|
85
106
|
PrecisionAtK.__doc__ = format_docstring(
|
|
86
107
|
ranking_metric_subclass_doc_string,
|
|
87
108
|
width=80,
|
|
@@ -91,4 +112,6 @@ PrecisionAtK.__doc__ = format_docstring(
|
|
|
91
112
|
relevance_type=relevance_type,
|
|
92
113
|
score_range_interpretation=score_range_interpretation,
|
|
93
114
|
formula=formula,
|
|
94
|
-
) +
|
|
115
|
+
) + ranking_metric_subclass_doc_string_post_desc.format(
|
|
116
|
+
extra_args=extra_args, example=example
|
|
117
|
+
)
|
|
@@ -221,7 +221,6 @@ by sorting in descending order. {score_range_interpretation}.
|
|
|
221
221
|
For each list of predicted scores `s` in `y_pred` and the corresponding list
|
|
222
222
|
of true labels `y` in `y_true`, the per-query {metric_abbreviation} score is
|
|
223
223
|
calculated as follows:
|
|
224
|
-
|
|
225
224
|
{formula}
|
|
226
225
|
|
|
227
226
|
The final {metric_abbreviation} score reported is typically the weighted
|
|
@@ -235,7 +234,7 @@ to get 1D weights. For more details, refer to
|
|
|
235
234
|
`keras_rs.src.metrics.ranking_metrics_utils.get_list_weights`.
|
|
236
235
|
"""
|
|
237
236
|
|
|
238
|
-
|
|
237
|
+
ranking_metric_subclass_doc_string_post_desc = """
|
|
239
238
|
|
|
240
239
|
Args:{extra_args}
|
|
241
240
|
k: int. The number of top-ranked items to consider (the 'k' in 'top-k').
|
|
@@ -249,4 +248,7 @@ ranking_metric_subclass_doc_string_args = """
|
|
|
249
248
|
`"float32"` unless set to different value
|
|
250
249
|
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
|
|
251
250
|
provided, then the `compute_dtype` will be utilized.
|
|
251
|
+
|
|
252
|
+
Example:
|
|
253
|
+
{example}
|
|
252
254
|
"""
|
|
@@ -7,7 +7,7 @@ from keras_rs.src.metrics.ranking_metric import (
|
|
|
7
7
|
ranking_metric_subclass_doc_string,
|
|
8
8
|
)
|
|
9
9
|
from keras_rs.src.metrics.ranking_metric import (
|
|
10
|
-
|
|
10
|
+
ranking_metric_subclass_doc_string_post_desc,
|
|
11
11
|
)
|
|
12
12
|
from keras_rs.src.metrics.ranking_metrics_utils import get_list_weights
|
|
13
13
|
from keras_rs.src.metrics.ranking_metrics_utils import sort_by_scores
|
|
@@ -73,6 +73,27 @@ R@k(y, s) = sum_i I[rank(s_i) < k] y_i / sum_j y_j
|
|
|
73
73
|
where `y_i` is the relevance label (0/1) of the item ranked at position
|
|
74
74
|
`i`, `I[condition]` is 1 if the condition is met, otherwise 0."""
|
|
75
75
|
extra_args = ""
|
|
76
|
+
example = """
|
|
77
|
+
>>> batch_size = 2
|
|
78
|
+
>>> list_size = 5
|
|
79
|
+
>>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
|
|
80
|
+
>>> scores = np.random.random(size=(batch_size, list_size))
|
|
81
|
+
>>> metric = keras_rs.metrics.RecallAtK()(
|
|
82
|
+
... y_true=labels, y_pred=scores
|
|
83
|
+
... )
|
|
84
|
+
|
|
85
|
+
Mask certain elements (can be used for uneven inputs):
|
|
86
|
+
|
|
87
|
+
>>> batch_size = 2
|
|
88
|
+
>>> list_size = 5
|
|
89
|
+
>>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
|
|
90
|
+
>>> scores = np.random.random(size=(batch_size, list_size))
|
|
91
|
+
>>> mask = np.random.randint(0, 2, size=(batch_size, list_size), dtype=bool)
|
|
92
|
+
>>> metric = keras_rs.metrics.RecallAtK()(
|
|
93
|
+
... y_true={"labels": labels, "mask": mask}, y_pred=scores
|
|
94
|
+
... )
|
|
95
|
+
"""
|
|
96
|
+
|
|
76
97
|
RecallAtK.__doc__ = format_docstring(
|
|
77
98
|
ranking_metric_subclass_doc_string,
|
|
78
99
|
width=80,
|
|
@@ -82,4 +103,6 @@ RecallAtK.__doc__ = format_docstring(
|
|
|
82
103
|
relevance_type=relevance_type,
|
|
83
104
|
score_range_interpretation=score_range_interpretation,
|
|
84
105
|
formula=formula,
|
|
85
|
-
) +
|
|
106
|
+
) + ranking_metric_subclass_doc_string_post_desc.format(
|
|
107
|
+
extra_args=extra_args, example=example
|
|
108
|
+
)
|
|
@@ -30,8 +30,13 @@ def format_docstring(template: str, width: int = 80, **kwargs: Any) -> str:
|
|
|
30
30
|
textwrap.indent(formula_dedented, base_indent_str)
|
|
31
31
|
)
|
|
32
32
|
elif "where:" in stripped_block:
|
|
33
|
+
# Expect this to be already indented.
|
|
34
|
+
splitted_block = stripped_block.split("\n")
|
|
33
35
|
processed_output.append(
|
|
34
|
-
textwrap.indent(
|
|
36
|
+
textwrap.indent(
|
|
37
|
+
splitted_block[0] + "\n\n" + "\n".join(splitted_block[1:]),
|
|
38
|
+
base_indent_str,
|
|
39
|
+
)
|
|
35
40
|
)
|
|
36
41
|
else:
|
|
37
42
|
processed_output.append(
|
keras_rs/src/version.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: keras-rs-nightly
|
|
3
|
-
Version: 0.0.1.
|
|
3
|
+
Version: 0.0.1.dev2025043003
|
|
4
4
|
Summary: Multi-backend recommender systems with Keras 3.
|
|
5
5
|
Author-email: Keras team <keras-users@googlegroups.com>
|
|
6
6
|
License: Apache License 2.0
|
|
@@ -36,6 +36,80 @@ This library is an extension of the core Keras API; all high-level modules
|
|
|
36
36
|
receive that same level of polish as core Keras. If you are familiar with Keras,
|
|
37
37
|
congratulations! You already understand most of Keras Recommenders.
|
|
38
38
|
|
|
39
|
+
## Quick Links
|
|
40
|
+
|
|
41
|
+
- [Home page](https://keras.io/keras_rs)
|
|
42
|
+
- [Examples](https://keras.io/keras_rs/examples)
|
|
43
|
+
- [API documentation](https://keras.io/keras_rs/api)
|
|
44
|
+
|
|
45
|
+
## Quickstart
|
|
46
|
+
|
|
47
|
+
### Train your own cross network
|
|
48
|
+
|
|
49
|
+
Choose a backend:
|
|
50
|
+
|
|
51
|
+
```python
|
|
52
|
+
import os
|
|
53
|
+
os.environ["KERAS_BACKEND"] = "jax" # Or "tensorflow" or "torch"!
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
Import KerasRS and other libraries:
|
|
57
|
+
|
|
58
|
+
```python
|
|
59
|
+
import keras
|
|
60
|
+
import keras_rs
|
|
61
|
+
import numpy as np
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
Define a simple model using the `FeatureCross` layer:
|
|
65
|
+
|
|
66
|
+
```python
|
|
67
|
+
vocabulary_size = 32
|
|
68
|
+
embedding_dim = 6
|
|
69
|
+
|
|
70
|
+
inputs = keras.Input(shape=(), name='indices', dtype="int32")
|
|
71
|
+
x0 = keras.layers.Embedding(
|
|
72
|
+
input_dim=vocabulary_size,
|
|
73
|
+
output_dim=embedding_dim
|
|
74
|
+
)(inputs)
|
|
75
|
+
x1 = keras_rs.layers.FeatureCross()(x0, x0)
|
|
76
|
+
x2 = keras_rs.layers.FeatureCross()(x0, x1)
|
|
77
|
+
output = keras.layers.Dense(units=10)(x2)
|
|
78
|
+
model = keras.Model(inputs, output)
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
Compile the model:
|
|
82
|
+
|
|
83
|
+
```python
|
|
84
|
+
model.compile(
|
|
85
|
+
loss=keras.losses.MeanSquaredError(),
|
|
86
|
+
optimizer=keras.optimizers.Adam(learning_rate=3e-4)
|
|
87
|
+
)
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
Call `model.fit()` on dummy data:
|
|
91
|
+
|
|
92
|
+
```python
|
|
93
|
+
batch_size = 2
|
|
94
|
+
x = np.random.randint(0, vocabulary_size, size=(batch_size,))
|
|
95
|
+
y = np.random.random(size=(batch_size,))
|
|
96
|
+
model.fit(x, y=y)
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
### Use ranking losses and metrics
|
|
100
|
+
|
|
101
|
+
If your task is to rank items in a list, you can make use of the ranking losses
|
|
102
|
+
and metrics which KerasRS provides. Below, we use the pairwise hinge loss and
|
|
103
|
+
track the nDCG metric:
|
|
104
|
+
|
|
105
|
+
```python
|
|
106
|
+
model.compile(
|
|
107
|
+
loss=keras_rs.losses.PairwiseHingeLoss(),
|
|
108
|
+
metrics=[keras_rs.metrics.NDCG()],
|
|
109
|
+
optimizer=keras.optimizers.Adam(learning_rate=3e-4),
|
|
110
|
+
)
|
|
111
|
+
```
|
|
112
|
+
|
|
39
113
|
## Installation
|
|
40
114
|
|
|
41
115
|
Keras Recommenders is available on PyPI as `keras-rs`:
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
keras_rs/__init__.py,sha256=8sjHiPN2GhUqAq4V7Vh4FLLqYw20-jgdI26ZKX5sg6M,350
|
|
2
|
+
keras_rs/layers/__init__.py,sha256=cvrFgFWg0RjI0ExUZOKZRdcN-FwTIkqhT33Vx8wGtjQ,905
|
|
3
|
+
keras_rs/losses/__init__.py,sha256=m04QOgxIUfJ2MvCUKLgEof-UbSNKgUYLPnY-D9NAclI,573
|
|
4
|
+
keras_rs/metrics/__init__.py,sha256=Qxpf6OFooIL9TIn2l3WgOea3HFRG0hq02glPAxtMZ9c,580
|
|
5
|
+
keras_rs/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
+
keras_rs/src/api_export.py,sha256=RsmG-DvO-cdFeAF9W6LRzms0kvtm-Yp9BAA_d-952zI,510
|
|
7
|
+
keras_rs/src/types.py,sha256=UyOdgjqrqg_b58opnY8n6gTiDHKVR8z_bmEruehERBk,514
|
|
8
|
+
keras_rs/src/version.py,sha256=6DQicfo43WsR2bsg-BdUHiGbBwGhNMF6hKd7NXYSW70,222
|
|
9
|
+
keras_rs/src/layers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
10
|
+
keras_rs/src/layers/feature_interaction/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
11
|
+
keras_rs/src/layers/feature_interaction/dot_interaction.py,sha256=bRLz03_8VaYLNG4gbIKCzsSc26shKMmzmwCs8SujezE,8542
|
|
12
|
+
keras_rs/src/layers/feature_interaction/feature_cross.py,sha256=rViVlJOGYG2f-uKTDQH7MdX2syRzIMkYYtAQUjz6F-0,8755
|
|
13
|
+
keras_rs/src/layers/retrieval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
14
|
+
keras_rs/src/layers/retrieval/brute_force_retrieval.py,sha256=izdppBXxJH0KqYEg7Zsr-SL-SHgAmnFopXMPalEO3uw,5676
|
|
15
|
+
keras_rs/src/layers/retrieval/hard_negative_mining.py,sha256=n5UftRcuuR7Lh75vOdFdqatpsYqJDHCsraNtAjeWvoM,3575
|
|
16
|
+
keras_rs/src/layers/retrieval/remove_accidental_hits.py,sha256=WKoIhUSc6SvbgLXcSqNvFUnkuyXfxWwsC7nAgYbON_U,3773
|
|
17
|
+
keras_rs/src/layers/retrieval/retrieval.py,sha256=hVOBF10SF2q_TgJdVUqztbnw5qQF-cxVRGdJbOKoL9M,4191
|
|
18
|
+
keras_rs/src/layers/retrieval/sampling_probability_correction.py,sha256=3zD6LInxhyIvyujMleGqiuoPKsna2oaTN6JU6xMnW_M,1977
|
|
19
|
+
keras_rs/src/losses/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
20
|
+
keras_rs/src/losses/pairwise_hinge_loss.py,sha256=tONOJpcwCw1mybwvyx8dAy5t6dDmlIn00enzWfQLXpQ,3049
|
|
21
|
+
keras_rs/src/losses/pairwise_logistic_loss.py,sha256=40PFdCHDM7CLunT_PE3RbgxROVImw13dgVL3o3nzeNg,3473
|
|
22
|
+
keras_rs/src/losses/pairwise_loss.py,sha256=1eux_u7PZ8BkAVdoZnt8nQxJuJeTQy_FJ8IspN5SsPc,6210
|
|
23
|
+
keras_rs/src/losses/pairwise_loss_utils.py,sha256=xvdGvdKNkvGvIaWYEQziWTFNa5EJz7rdkVGgrsnDHUk,1246
|
|
24
|
+
keras_rs/src/losses/pairwise_mean_squared_error.py,sha256=zFiSr2TNyJysgULxj9R_trpIMRNL_4MqpiAMNPUYmR0,4855
|
|
25
|
+
keras_rs/src/losses/pairwise_soft_zero_one_loss.py,sha256=YddVtJS8tKEeb0YrqGzEsr-6IDxH4uRjFrYkZDMWpkk,3492
|
|
26
|
+
keras_rs/src/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
27
|
+
keras_rs/src/metrics/dcg.py,sha256=UT5EyStuMeF7kpVguF34u7__Hr0bWfSFqEoyX1F4dtA,5836
|
|
28
|
+
keras_rs/src/metrics/mean_average_precision.py,sha256=fRptyVvhCtzg0rXhBaTfLmqo7dKIG7vS75HK0xuDvpg,4629
|
|
29
|
+
keras_rs/src/metrics/mean_reciprocal_rank.py,sha256=R_LDAuKLK9buSD6hh3_nm0PksMhISbpuI6fR1MTsFWM,4034
|
|
30
|
+
keras_rs/src/metrics/ndcg.py,sha256=OX8vqO5JoBm8I7NDOce0bXwtoGNEK0hGEQT8hYfqJDA,6935
|
|
31
|
+
keras_rs/src/metrics/precision_at_k.py,sha256=A1pL5-Yo_DfDzUqAfqbF8TY39yqFgf_Fe1cxz0AsCfE,4029
|
|
32
|
+
keras_rs/src/metrics/ranking_metric.py,sha256=GFtOszaDmP4Q1ky3KnyMNXR6OBu09Uk4aEOJyn5-JO4,10439
|
|
33
|
+
keras_rs/src/metrics/ranking_metrics_utils.py,sha256=989J8pr6FRsA1HwBeF7SA8uQqjZT2XeCxKfRuMysWnQ,8828
|
|
34
|
+
keras_rs/src/metrics/recall_at_k.py,sha256=allUQA6JvPcWXxtGUHXmZ_nOWHAOmuUrIy5s5Nxse-4,3695
|
|
35
|
+
keras_rs/src/metrics/utils.py,sha256=6xanTNdwARn4ugzmb7ko2kwAhNhsnR4NhrpS_qW0IKc,2506
|
|
36
|
+
keras_rs/src/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
37
|
+
keras_rs/src/utils/doc_string_utils.py,sha256=CmqomepmaYcvpACpXEXkrJb8DMnvIgmYK-lJ53lYarY,1675
|
|
38
|
+
keras_rs/src/utils/keras_utils.py,sha256=d28OdQP4GrJk4NIQS4n0KPtCbgOCxVU_vDnnI7ODpOw,1562
|
|
39
|
+
keras_rs_nightly-0.0.1.dev2025043003.dist-info/METADATA,sha256=9RvG8sYrJD060w9nUrJ_vIVKwx_M3CzH_f0dquulVjg,5199
|
|
40
|
+
keras_rs_nightly-0.0.1.dev2025043003.dist-info/WHEEL,sha256=ooBFpIzZCPdw3uqIQsOo4qqbA4ZRPxHnOH7peeONza0,91
|
|
41
|
+
keras_rs_nightly-0.0.1.dev2025043003.dist-info/top_level.txt,sha256=pWs8X78Z0cn6lfcIb9VYOW5UeJ-TpoaO9dByzo7_FFo,9
|
|
42
|
+
keras_rs_nightly-0.0.1.dev2025043003.dist-info/RECORD,,
|
|
@@ -1,42 +0,0 @@
|
|
|
1
|
-
keras_rs/__init__.py,sha256=8sjHiPN2GhUqAq4V7Vh4FLLqYw20-jgdI26ZKX5sg6M,350
|
|
2
|
-
keras_rs/layers/__init__.py,sha256=cvrFgFWg0RjI0ExUZOKZRdcN-FwTIkqhT33Vx8wGtjQ,905
|
|
3
|
-
keras_rs/losses/__init__.py,sha256=m04QOgxIUfJ2MvCUKLgEof-UbSNKgUYLPnY-D9NAclI,573
|
|
4
|
-
keras_rs/metrics/__init__.py,sha256=Qxpf6OFooIL9TIn2l3WgOea3HFRG0hq02glPAxtMZ9c,580
|
|
5
|
-
keras_rs/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
-
keras_rs/src/api_export.py,sha256=RsmG-DvO-cdFeAF9W6LRzms0kvtm-Yp9BAA_d-952zI,510
|
|
7
|
-
keras_rs/src/types.py,sha256=UyOdgjqrqg_b58opnY8n6gTiDHKVR8z_bmEruehERBk,514
|
|
8
|
-
keras_rs/src/version.py,sha256=7yE4X2uVxePL-l4daVZqTqrjFfoFLjBVYZA4sczaeoM,222
|
|
9
|
-
keras_rs/src/layers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
10
|
-
keras_rs/src/layers/feature_interaction/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
11
|
-
keras_rs/src/layers/feature_interaction/dot_interaction.py,sha256=jGHcg0EiWxth6LTxG2yWgHcyx_GXrxvA61uQqpPfnDQ,6900
|
|
12
|
-
keras_rs/src/layers/feature_interaction/feature_cross.py,sha256=5OCSI0vFYzJNmgkKcuHIbVv8U2q3UvS80-qZjPimDjM,8155
|
|
13
|
-
keras_rs/src/layers/retrieval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
14
|
-
keras_rs/src/layers/retrieval/brute_force_retrieval.py,sha256=izdppBXxJH0KqYEg7Zsr-SL-SHgAmnFopXMPalEO3uw,5676
|
|
15
|
-
keras_rs/src/layers/retrieval/hard_negative_mining.py,sha256=IWFrbw1h9z3AUw4oUBKf5_Aud4MTHO_AKdHfoyFa5As,3031
|
|
16
|
-
keras_rs/src/layers/retrieval/remove_accidental_hits.py,sha256=Z84z2YgKspKeNdc5id8lf9TAyFsbCCz3acJxiKXYipc,3324
|
|
17
|
-
keras_rs/src/layers/retrieval/retrieval.py,sha256=hVOBF10SF2q_TgJdVUqztbnw5qQF-cxVRGdJbOKoL9M,4191
|
|
18
|
-
keras_rs/src/layers/retrieval/sampling_probability_correction.py,sha256=80vgOPfBiF-PC0dSyqS57IcIxOxi_Q_R7eSXHn1G0yI,1437
|
|
19
|
-
keras_rs/src/losses/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
20
|
-
keras_rs/src/losses/pairwise_hinge_loss.py,sha256=nrIU0d1IcCAGo7RVxNitkldJhY2ZrXxjTV7Po27FXds,950
|
|
21
|
-
keras_rs/src/losses/pairwise_logistic_loss.py,sha256=2dTtRmrNfvF_lOvHK0UQ518L2d4fkvQZDj30HWB5A2s,1305
|
|
22
|
-
keras_rs/src/losses/pairwise_loss.py,sha256=rmDr_Qc3yA0CR8rUCCGjOgdbjYfC505BLNuITyb1n8k,6132
|
|
23
|
-
keras_rs/src/losses/pairwise_loss_utils.py,sha256=xvdGvdKNkvGvIaWYEQziWTFNa5EJz7rdkVGgrsnDHUk,1246
|
|
24
|
-
keras_rs/src/losses/pairwise_mean_squared_error.py,sha256=KhSRvjg4RpwhASP1Sl7PZoq2488P_uGDr9tZWzZhDVU,2764
|
|
25
|
-
keras_rs/src/losses/pairwise_soft_zero_one_loss.py,sha256=QdWn-lyWQM-U9ID9xGQ7oK10q9XT6qd1gxVAKy8hZH4,1239
|
|
26
|
-
keras_rs/src/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
27
|
-
keras_rs/src/metrics/dcg.py,sha256=DzSBc9ZbgNavuHRt3wtVzdx4ouAaaqeYhd9NxQLPq0g,5120
|
|
28
|
-
keras_rs/src/metrics/mean_average_precision.py,sha256=SF5NlhlyVL9L_YVkj_s_135f3-8hILVHRziSGafGyZI,3915
|
|
29
|
-
keras_rs/src/metrics/mean_reciprocal_rank.py,sha256=4stq0MzyWNokMlol6BESDAMuoUFieDrFFc57ue94h4Y,3240
|
|
30
|
-
keras_rs/src/metrics/ndcg.py,sha256=G7WNFoUaOhnf4vMF1jgcI4yGxieUfJv5E0upv4Qs1AQ,6545
|
|
31
|
-
keras_rs/src/metrics/precision_at_k.py,sha256=u-mj49qamt448gxkOI9YIZMMrhgO8QmetRFXGGlWOqY,3247
|
|
32
|
-
keras_rs/src/metrics/ranking_metric.py,sha256=cdFb4Lg2Z8P-02ImMGUAX4XeOUyzEE8TA6nB4fDgq0U,10411
|
|
33
|
-
keras_rs/src/metrics/ranking_metrics_utils.py,sha256=989J8pr6FRsA1HwBeF7SA8uQqjZT2XeCxKfRuMysWnQ,8828
|
|
34
|
-
keras_rs/src/metrics/recall_at_k.py,sha256=hlPnR5AtFjdd5AG0zLkLGVyLO5mWtp2bAu_cSOq9Fws,2919
|
|
35
|
-
keras_rs/src/metrics/utils.py,sha256=6xanTNdwARn4ugzmb7ko2kwAhNhsnR4NhrpS_qW0IKc,2506
|
|
36
|
-
keras_rs/src/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
37
|
-
keras_rs/src/utils/doc_string_utils.py,sha256=yVyQ8pYdl4gd4tKRhD8dXmQX1EwZeLiV3cCq3A1tUEk,1466
|
|
38
|
-
keras_rs/src/utils/keras_utils.py,sha256=d28OdQP4GrJk4NIQS4n0KPtCbgOCxVU_vDnnI7ODpOw,1562
|
|
39
|
-
keras_rs_nightly-0.0.1.dev2025042803.dist-info/METADATA,sha256=4W-DkQ0hKfcBFI6CAJzbJWAiNRGhLGFYrxLyGVo8GBM,3614
|
|
40
|
-
keras_rs_nightly-0.0.1.dev2025042803.dist-info/WHEEL,sha256=ck4Vq1_RXyvS4Jt6SI0Vz6fyVs4GWg7AINwpsaGEgPE,91
|
|
41
|
-
keras_rs_nightly-0.0.1.dev2025042803.dist-info/top_level.txt,sha256=pWs8X78Z0cn6lfcIb9VYOW5UeJ-TpoaO9dByzo7_FFo,9
|
|
42
|
-
keras_rs_nightly-0.0.1.dev2025042803.dist-info/RECORD,,
|
|
File without changes
|