keras-rs-nightly 0.0.1.dev2025042903__tar.gz → 0.0.1.dev2025043003__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of keras-rs-nightly might be problematic. Click here for more details.
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/PKG-INFO +2 -2
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/README.md +1 -1
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/retrieval/hard_negative_mining.py +3 -2
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/retrieval/remove_accidental_hits.py +1 -1
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/retrieval/sampling_probability_correction.py +2 -1
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/losses/pairwise_hinge_loss.py +14 -11
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/losses/pairwise_logistic_loss.py +15 -12
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/losses/pairwise_loss.py +11 -6
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/losses/pairwise_mean_squared_error.py +14 -11
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/losses/pairwise_soft_zero_one_loss.py +15 -12
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/dcg.py +11 -11
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/mean_average_precision.py +14 -17
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/mean_reciprocal_rank.py +2 -1
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/ndcg.py +21 -29
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/precision_at_k.py +2 -1
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/ranking_metric.py +0 -1
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/recall_at_k.py +2 -1
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/utils/doc_string_utils.py +6 -1
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/version.py +1 -1
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs_nightly.egg-info/PKG-INFO +2 -2
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/api/__init__.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/api/layers/__init__.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/api/losses/__init__.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/api/metrics/__init__.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/__init__.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/api_export.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/__init__.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/feature_interaction/__init__.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/feature_interaction/dot_interaction.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/feature_interaction/feature_cross.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/retrieval/__init__.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/retrieval/brute_force_retrieval.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/retrieval/retrieval.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/losses/__init__.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/losses/pairwise_loss_utils.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/__init__.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/ranking_metrics_utils.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/utils.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/types.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/utils/__init__.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/utils/keras_utils.py +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs_nightly.egg-info/SOURCES.txt +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs_nightly.egg-info/dependency_links.txt +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs_nightly.egg-info/requires.txt +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs_nightly.egg-info/top_level.txt +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/pyproject.toml +0 -0
- {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: keras-rs-nightly
|
|
3
|
-
Version: 0.0.1.
|
|
3
|
+
Version: 0.0.1.dev2025043003
|
|
4
4
|
Summary: Multi-backend recommender systems with Keras 3.
|
|
5
5
|
Author-email: Keras team <keras-users@googlegroups.com>
|
|
6
6
|
License: Apache License 2.0
|
|
@@ -93,7 +93,7 @@ Call `model.fit()` on dummy data:
|
|
|
93
93
|
batch_size = 2
|
|
94
94
|
x = np.random.randint(0, vocabulary_size, size=(batch_size,))
|
|
95
95
|
y = np.random.random(size=(batch_size,))
|
|
96
|
-
model.fit(
|
|
96
|
+
model.fit(x, y=y)
|
|
97
97
|
```
|
|
98
98
|
|
|
99
99
|
### Use ranking losses and metrics
|
|
@@ -57,8 +57,9 @@ class HardNegativeMining(keras.layers.Layer):
|
|
|
57
57
|
Returns:
|
|
58
58
|
A tuple containing two tensors with the last dimension of
|
|
59
59
|
`num_candidates` replaced with `num_hard_negatives + 1`.
|
|
60
|
-
|
|
61
|
-
|
|
60
|
+
|
|
61
|
+
* logits: `[..., num_hard_negatives + 1]` tensor of logits.
|
|
62
|
+
* labels: `[..., num_hard_negatives + 1]` one-hot tensor of labels.
|
|
62
63
|
"""
|
|
63
64
|
|
|
64
65
|
# Number of sampled logits, i.e, the number of hard negatives to be
|
|
@@ -51,7 +51,7 @@ class RemoveAccidentalHits(keras.layers.Layer):
|
|
|
51
51
|
`labels`.
|
|
52
52
|
|
|
53
53
|
Returns:
|
|
54
|
-
|
|
54
|
+
The modified logits with the same shape as the input logits.
|
|
55
55
|
"""
|
|
56
56
|
# A more principled way is to implement
|
|
57
57
|
# `softmax_cross_entropy_with_logits` with a input mask. Here we
|
|
@@ -50,7 +50,8 @@ class SamplingProbabilityCorrection(keras.layers.Layer):
|
|
|
50
50
|
candidate_sampling_probability: The sampling probability with the
|
|
51
51
|
same shape as `logits`.
|
|
52
52
|
|
|
53
|
-
Returns:
|
|
53
|
+
Returns:
|
|
54
|
+
The corrected logits with the same shape as the input logits.
|
|
54
55
|
"""
|
|
55
56
|
return logits - ops.log(
|
|
56
57
|
ops.clip(candidate_sampling_probability, self.epsilon, 1.0)
|
|
@@ -14,13 +14,13 @@ class PairwiseHingeLoss(PairwiseLoss):
|
|
|
14
14
|
|
|
15
15
|
formula = "loss = sum_{i} sum_{j} I(y_i > y_j) * max(0, 1 - (s_i - s_j))"
|
|
16
16
|
explanation = """
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
17
|
+
- `max(0, 1 - (s_i - s_j))` is the hinge loss, which penalizes cases where
|
|
18
|
+
the score difference `s_i - s_j` is not sufficiently large when
|
|
19
|
+
`y_i > y_j`.
|
|
20
20
|
"""
|
|
21
21
|
extra_args = ""
|
|
22
22
|
example = """
|
|
23
|
-
|
|
23
|
+
With `compile()` API:
|
|
24
24
|
|
|
25
25
|
```python
|
|
26
26
|
model.compile(
|
|
@@ -29,23 +29,24 @@ example = """
|
|
|
29
29
|
)
|
|
30
30
|
```
|
|
31
31
|
|
|
32
|
-
|
|
33
|
-
|
|
32
|
+
As a standalone function with unbatched inputs:
|
|
33
|
+
|
|
34
34
|
>>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
|
|
35
35
|
>>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
|
|
36
36
|
>>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
|
|
37
37
|
>>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
|
|
38
38
|
2.32000
|
|
39
39
|
|
|
40
|
-
|
|
41
|
-
|
|
40
|
+
With batched inputs using default 'auto'/'sum_over_batch_size' reduction:
|
|
41
|
+
|
|
42
42
|
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
43
43
|
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
44
44
|
>>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
|
|
45
45
|
>>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
|
|
46
46
|
0.75
|
|
47
47
|
|
|
48
|
-
|
|
48
|
+
With masked inputs (useful for ragged inputs):
|
|
49
|
+
|
|
49
50
|
>>> y_true = {
|
|
50
51
|
... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
|
|
51
52
|
... "mask": np.array(
|
|
@@ -56,7 +57,8 @@ example = """
|
|
|
56
57
|
>>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
|
|
57
58
|
0.64999
|
|
58
59
|
|
|
59
|
-
|
|
60
|
+
With `sample_weight`:
|
|
61
|
+
|
|
60
62
|
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
61
63
|
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
62
64
|
>>> sample_weight = np.array(
|
|
@@ -68,7 +70,8 @@ example = """
|
|
|
68
70
|
... )
|
|
69
71
|
1.02499
|
|
70
72
|
|
|
71
|
-
|
|
73
|
+
Using `'none'` reduction:
|
|
74
|
+
|
|
72
75
|
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
73
76
|
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
74
77
|
>>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss(
|
|
@@ -22,14 +22,14 @@ class PairwiseLogisticLoss(PairwiseLoss):
|
|
|
22
22
|
|
|
23
23
|
formula = "loss = sum_{i} sum_{j} I(y_i > y_j) * log(1 + exp(-(s_i - s_j)))"
|
|
24
24
|
explanation = """
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
25
|
+
- `log(1 + exp(-(s_i - s_j)))` is the logistic loss, which penalizes
|
|
26
|
+
cases where the score difference `s_i - s_j` is not sufficiently large
|
|
27
|
+
when `y_i > y_j`. This function provides a smooth approximation of the
|
|
28
|
+
ideal step function, making it suitable for gradient-based optimization.
|
|
29
29
|
"""
|
|
30
30
|
extra_args = ""
|
|
31
31
|
example = """
|
|
32
|
-
|
|
32
|
+
With `compile()` API:
|
|
33
33
|
|
|
34
34
|
```python
|
|
35
35
|
model.compile(
|
|
@@ -38,23 +38,24 @@ example = """
|
|
|
38
38
|
)
|
|
39
39
|
```
|
|
40
40
|
|
|
41
|
-
|
|
42
|
-
|
|
41
|
+
As a standalone function with unbatched inputs:
|
|
42
|
+
|
|
43
43
|
>>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
|
|
44
44
|
>>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
|
|
45
45
|
>>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
|
|
46
46
|
>>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
|
|
47
47
|
>>> 1.70708
|
|
48
48
|
|
|
49
|
-
|
|
50
|
-
|
|
49
|
+
With batched inputs using default 'auto'/'sum_over_batch_size' reduction:
|
|
50
|
+
|
|
51
51
|
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
52
52
|
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
53
53
|
>>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
|
|
54
54
|
>>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
|
|
55
55
|
0.73936
|
|
56
56
|
|
|
57
|
-
|
|
57
|
+
With masked inputs (useful for ragged inputs):
|
|
58
|
+
|
|
58
59
|
>>> y_true = {
|
|
59
60
|
... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
|
|
60
61
|
... "mask": np.array(
|
|
@@ -65,7 +66,8 @@ example = """
|
|
|
65
66
|
>>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
|
|
66
67
|
0.53751
|
|
67
68
|
|
|
68
|
-
|
|
69
|
+
With `sample_weight`:
|
|
70
|
+
|
|
69
71
|
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
70
72
|
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
71
73
|
>>> sample_weight = np.array(
|
|
@@ -77,7 +79,8 @@ example = """
|
|
|
77
79
|
... )
|
|
78
80
|
>>> 0.80337
|
|
79
81
|
|
|
80
|
-
|
|
82
|
+
Using `'none'` reduction:
|
|
83
|
+
|
|
81
84
|
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
82
85
|
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
83
86
|
>>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss(
|
|
@@ -70,7 +70,8 @@ class PairwiseLoss(keras.losses.Loss, abc.ABC):
|
|
|
70
70
|
y_true: types.Tensor,
|
|
71
71
|
y_pred: types.Tensor,
|
|
72
72
|
) -> types.Tensor:
|
|
73
|
-
"""
|
|
73
|
+
"""Compute the pairwise loss.
|
|
74
|
+
|
|
74
75
|
Args:
|
|
75
76
|
y_true: tensor or dict. Ground truth values. If tensor, of shape
|
|
76
77
|
`(list_size)` for unbatched inputs or `(batch_size, list_size)`
|
|
@@ -83,6 +84,9 @@ class PairwiseLoss(keras.losses.Loss, abc.ABC):
|
|
|
83
84
|
y_pred: tensor. The predicted values, of shape `(list_size)` for
|
|
84
85
|
unbatched inputs or `(batch_size, list_size)` for batched
|
|
85
86
|
inputs. Should be of the same shape as `y_true`.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
The loss.
|
|
86
90
|
"""
|
|
87
91
|
mask = None
|
|
88
92
|
if isinstance(y_true, dict):
|
|
@@ -134,11 +138,12 @@ pairwise_loss_subclass_doc_string = (
|
|
|
134
138
|
```
|
|
135
139
|
|
|
136
140
|
where:
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
141
|
+
|
|
142
|
+
- `y_i` and `y_j` are the true labels of items `i` and `j`, respectively.
|
|
143
|
+
- `s_i` and `s_j` are the predicted scores of items `i` and `j`,
|
|
144
|
+
respectively.
|
|
145
|
+
- `I(y_i > y_j)` is an indicator function that equals 1 if `y_i > y_j`,
|
|
146
|
+
and 0 otherwise.{explanation}
|
|
142
147
|
Args:{extra_args}
|
|
143
148
|
reduction: Type of reduction to apply to the loss. In almost all cases
|
|
144
149
|
this should be `"sum_over_batch_size"`. Supported options are
|
|
@@ -59,13 +59,13 @@ class PairwiseMeanSquaredError(PairwiseLoss):
|
|
|
59
59
|
|
|
60
60
|
formula = "loss = sum_{i} sum_{j} I(y_i > y_j) * (s_i - s_j)^2"
|
|
61
61
|
explanation = """
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
62
|
+
- `(s_i - s_j)^2` is the squared difference between the predicted scores
|
|
63
|
+
of items `i` and `j`, which penalizes discrepancies between the predicted
|
|
64
|
+
order of items relative to their true order.
|
|
65
65
|
"""
|
|
66
66
|
extra_args = ""
|
|
67
67
|
example = """
|
|
68
|
-
|
|
68
|
+
With `compile()` API:
|
|
69
69
|
|
|
70
70
|
```python
|
|
71
71
|
model.compile(
|
|
@@ -74,23 +74,24 @@ example = """
|
|
|
74
74
|
)
|
|
75
75
|
```
|
|
76
76
|
|
|
77
|
-
|
|
78
|
-
|
|
77
|
+
As a standalone function with unbatched inputs:
|
|
78
|
+
|
|
79
79
|
>>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
|
|
80
80
|
>>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
|
|
81
81
|
>>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError()
|
|
82
82
|
>>> pairwise_mse(y_true=y_true, y_pred=y_pred)
|
|
83
83
|
>>> 19.10400
|
|
84
84
|
|
|
85
|
-
|
|
86
|
-
|
|
85
|
+
With batched inputs using default 'auto'/'sum_over_batch_size' reduction:
|
|
86
|
+
|
|
87
87
|
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
88
88
|
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
89
89
|
>>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError()
|
|
90
90
|
>>> pairwise_mse(y_true=y_true, y_pred=y_pred)
|
|
91
91
|
5.57999
|
|
92
92
|
|
|
93
|
-
|
|
93
|
+
With masked inputs (useful for ragged inputs):
|
|
94
|
+
|
|
94
95
|
>>> y_true = {
|
|
95
96
|
... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
|
|
96
97
|
... "mask": np.array(
|
|
@@ -101,7 +102,8 @@ example = """
|
|
|
101
102
|
>>> pairwise_mse(y_true=y_true, y_pred=y_pred)
|
|
102
103
|
4.76000
|
|
103
104
|
|
|
104
|
-
|
|
105
|
+
With `sample_weight`:
|
|
106
|
+
|
|
105
107
|
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
106
108
|
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
107
109
|
>>> sample_weight = np.array(
|
|
@@ -113,7 +115,8 @@ example = """
|
|
|
113
115
|
... )
|
|
114
116
|
11.0500
|
|
115
117
|
|
|
116
|
-
|
|
118
|
+
Using `'none'` reduction:
|
|
119
|
+
|
|
117
120
|
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
118
121
|
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
119
122
|
>>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError(
|
|
@@ -18,14 +18,14 @@ class PairwiseSoftZeroOneLoss(PairwiseLoss):
|
|
|
18
18
|
|
|
19
19
|
formula = "loss = sum_{i} sum_{j} I(y_i > y_j) * (1 - sigmoid(s_i - s_j))"
|
|
20
20
|
explanation = """
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
21
|
+
- `(1 - sigmoid(s_i - s_j))` represents the soft zero-one loss, which
|
|
22
|
+
approximates the ideal zero-one loss (which would be 1 if `s_i < s_j`
|
|
23
|
+
and 0 otherwise) with a smooth, differentiable function. This makes it
|
|
24
|
+
suitable for gradient-based optimization.
|
|
25
25
|
"""
|
|
26
26
|
extra_args = ""
|
|
27
27
|
example = """
|
|
28
|
-
|
|
28
|
+
With `compile()` API:
|
|
29
29
|
|
|
30
30
|
```python
|
|
31
31
|
model.compile(
|
|
@@ -34,23 +34,24 @@ example = """
|
|
|
34
34
|
)
|
|
35
35
|
```
|
|
36
36
|
|
|
37
|
-
|
|
38
|
-
|
|
37
|
+
As a standalone function with unbatched inputs:
|
|
38
|
+
|
|
39
39
|
>>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
|
|
40
40
|
>>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
|
|
41
41
|
>>> pairwise_soft_zero_one_loss = keras_rs.losses.PairwiseSoftZeroOneLoss()
|
|
42
42
|
>>> pairwise_soft_zero_one_loss(y_true=y_true, y_pred=y_pred)
|
|
43
43
|
0.86103
|
|
44
44
|
|
|
45
|
-
|
|
46
|
-
|
|
45
|
+
With batched inputs using default 'auto'/'sum_over_batch_size' reduction:
|
|
46
|
+
|
|
47
47
|
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
48
48
|
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
49
49
|
>>> pairwise_soft_zero_one_loss = keras_rs.losses.PairwiseSoftZeroOneLoss()
|
|
50
50
|
>>> pairwise_soft_zero_one_loss(y_true=y_true, y_pred=y_pred)
|
|
51
51
|
0.46202
|
|
52
52
|
|
|
53
|
-
|
|
53
|
+
With masked inputs (useful for ragged inputs):
|
|
54
|
+
|
|
54
55
|
>>> y_true = {
|
|
55
56
|
... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
|
|
56
57
|
... "mask": np.array(
|
|
@@ -61,7 +62,8 @@ example = """
|
|
|
61
62
|
>>> pairwise_soft_zero_one_loss(y_true=y_true, y_pred=y_pred)
|
|
62
63
|
0.29468
|
|
63
64
|
|
|
64
|
-
|
|
65
|
+
With `sample_weight`:
|
|
66
|
+
|
|
65
67
|
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
66
68
|
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
67
69
|
>>> sample_weight = np.array(
|
|
@@ -73,7 +75,8 @@ example = """
|
|
|
73
75
|
... )
|
|
74
76
|
0.40478
|
|
75
77
|
|
|
76
|
-
|
|
78
|
+
Using `'none'` reduction:
|
|
79
|
+
|
|
77
80
|
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
|
|
78
81
|
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
|
|
79
82
|
>>> pairwise_soft_zero_one_loss = keras_rs.losses.PairwiseSoftZeroOneLoss(
|
|
@@ -111,16 +111,15 @@ DCG@k(y', w') = sum_{i=1}^{k} (gain_fn(y'_i) / rank_discount_fn(i))
|
|
|
111
111
|
```
|
|
112
112
|
|
|
113
113
|
where:
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
"""
|
|
114
|
+
- `y'_i` is the true relevance score of the item ranked at position `i`
|
|
115
|
+
(obtained by sorting `y_true` according to `y_pred`).
|
|
116
|
+
- `gain_fn` is the user-provided function mapping relevance `y'_i` to a
|
|
117
|
+
gain value. The default function (`default_gain_fn`) is typically
|
|
118
|
+
equivalent to `lambda y: 2**y - 1`.
|
|
119
|
+
- `rank_discount_fn` is the user-provided function mapping rank `i`
|
|
120
|
+
to a discount value. The default function (`default_rank_discount_fn`)
|
|
121
|
+
is typically equivalent to `lambda rank: 1 / log2(rank + 1)`.
|
|
122
|
+
- The final result aggregates these per-list scores."""
|
|
124
123
|
extra_args = """
|
|
125
124
|
gain_fn: callable. Maps relevance scores (`y_true`) to gain values. The
|
|
126
125
|
default implements `2**y - 1`.
|
|
@@ -136,7 +135,8 @@ example = """
|
|
|
136
135
|
... y_true=labels, y_pred=scores
|
|
137
136
|
... )
|
|
138
137
|
|
|
139
|
-
|
|
138
|
+
Mask certain elements (can be used for uneven inputs):
|
|
139
|
+
|
|
140
140
|
>>> batch_size = 2
|
|
141
141
|
>>> list_size = 5
|
|
142
142
|
>>> labels = np.random.randint(0, 3, size=(batch_size, list_size))
|
|
@@ -82,22 +82,18 @@ rel(j) = y_i if rank(s_i) = j
|
|
|
82
82
|
```
|
|
83
83
|
|
|
84
84
|
where:
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
score `s_i`.
|
|
98
|
-
- `sum_i y_i` calculates the total number of relevant items in the
|
|
99
|
-
original list `y`.
|
|
100
|
-
"""
|
|
85
|
+
- `j` represents the rank position (starting from 1).
|
|
86
|
+
- `sum_j` indicates a summation over all ranks `j` from 1 up to the list
|
|
87
|
+
size (or `k`).
|
|
88
|
+
- `P@j(y, s)` denotes the Precision at rank `j`, calculated as the
|
|
89
|
+
number of relevant items found within the top `j` positions divided by `j`.
|
|
90
|
+
- `rel(j)` represents the relevance of the item specifically at rank
|
|
91
|
+
`j`. `rel(j)` is 1 if the item at rank `j` is relevant, and 0 otherwise.
|
|
92
|
+
- `y_i` is the true relevance label of the original item `i` before ranking.
|
|
93
|
+
- `rank(s_i)` is the rank position assigned to item `i` based on its score
|
|
94
|
+
`s_i`.
|
|
95
|
+
- `sum_i y_i` calculates the total number of relevant items in the original
|
|
96
|
+
list `y`."""
|
|
101
97
|
extra_args = ""
|
|
102
98
|
example = """
|
|
103
99
|
>>> batch_size = 2
|
|
@@ -108,7 +104,8 @@ example = """
|
|
|
108
104
|
... y_true=labels, y_pred=scores
|
|
109
105
|
... )
|
|
110
106
|
|
|
111
|
-
|
|
107
|
+
Mask certain elements (can be used for uneven inputs):
|
|
108
|
+
|
|
112
109
|
>>> batch_size = 2
|
|
113
110
|
>>> list_size = 5
|
|
114
111
|
>>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
|
|
@@ -95,7 +95,8 @@ example = """
|
|
|
95
95
|
... y_true=labels, y_pred=scores
|
|
96
96
|
... )
|
|
97
97
|
|
|
98
|
-
|
|
98
|
+
Mask certain elements (can be used for uneven inputs):
|
|
99
|
+
|
|
99
100
|
>>> batch_size = 2
|
|
100
101
|
>>> list_size = 5
|
|
101
102
|
>>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
|
|
@@ -109,7 +109,7 @@ class NDCG(RankingMetric):
|
|
|
109
109
|
|
|
110
110
|
concept_sentence = (
|
|
111
111
|
"It normalizes the Discounted Cumulative Gain (DCG) with the Ideal "
|
|
112
|
-
"Discounted Cumulative Gain (IDCG) for each list
|
|
112
|
+
"Discounted Cumulative Gain (IDCG) for each list"
|
|
113
113
|
)
|
|
114
114
|
relevance_type = (
|
|
115
115
|
"graded relevance scores (non-negative numbers where higher values "
|
|
@@ -124,11 +124,6 @@ score_range_interpretation = (
|
|
|
124
124
|
)
|
|
125
125
|
|
|
126
126
|
formula = """
|
|
127
|
-
The metric calculates a weighted average nDCG score per list.
|
|
128
|
-
For a single list, nDCG is computed as the ratio of the Discounted
|
|
129
|
-
Cumulative Gain (DCG) of the predicted ranking to the Ideal Discounted
|
|
130
|
-
Cumulative Gain (IDCG) of the best possible ranking:
|
|
131
|
-
|
|
132
127
|
```
|
|
133
128
|
nDCG@k = DCG@k / IDCG@k
|
|
134
129
|
```
|
|
@@ -147,29 +142,25 @@ IDCG@k(y'') = sum_{i=1}^{k} (gain_fn(y''_i) / rank_discount_fn(i))
|
|
|
147
142
|
```
|
|
148
143
|
|
|
149
144
|
where:
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
potentially involving normalization by list-specific weights, to
|
|
164
|
-
produce a weighted average.
|
|
165
|
-
"""
|
|
145
|
+
- `y'_i`: True relevance of the item at rank `i` in the ranking induced by
|
|
146
|
+
`y_pred`.
|
|
147
|
+
- `y''_i` True relevance of the item at rank `i` in the *ideal* ranking (sorted
|
|
148
|
+
by `y_true` descending).
|
|
149
|
+
- `gain_fn` is the user-provided function mapping relevance to gain. The default
|
|
150
|
+
function (`default_gain_fn`) is typically equivalent to `lambda y: 2**y - 1`.
|
|
151
|
+
- `rank_discount_fn` is the user-provided function mapping rank `i` (1-based) to
|
|
152
|
+
a discount value. The default function (`default_rank_discount_fn`) is
|
|
153
|
+
typically equivalent to `lambda rank: 1 / log2(rank + 1)`.
|
|
154
|
+
- If IDCG@k is 0 (e.g., no relevant items), nDCG@k is defined as 0.
|
|
155
|
+
- The final result often aggregates these per-list nDCG scores, potentially
|
|
156
|
+
involving normalization by list-specific weights, to produce a weighted
|
|
157
|
+
average."""
|
|
166
158
|
extra_args = """
|
|
167
|
-
gain_fn: callable. Maps relevance scores (`y_true`) to gain values. The
|
|
168
|
-
|
|
169
|
-
rank_discount_fn:
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
"""
|
|
159
|
+
gain_fn: callable. Maps relevance scores (`y_true`) to gain values. The
|
|
160
|
+
default implements `2**y - 1`.
|
|
161
|
+
rank_discount_fn: function. Maps rank positions to discount
|
|
162
|
+
values. The default (`default_rank_discount_fn`) implements
|
|
163
|
+
`1 / log2(rank + 1)`."""
|
|
173
164
|
example = """
|
|
174
165
|
>>> batch_size = 2
|
|
175
166
|
>>> list_size = 5
|
|
@@ -179,7 +170,8 @@ example = """
|
|
|
179
170
|
... y_true=labels, y_pred=scores
|
|
180
171
|
... )
|
|
181
172
|
|
|
182
|
-
|
|
173
|
+
Mask certain elements (can be used for uneven inputs):
|
|
174
|
+
|
|
183
175
|
>>> batch_size = 2
|
|
184
176
|
>>> list_size = 5
|
|
185
177
|
>>> labels = np.random.randint(0, 3, size=(batch_size, list_size))
|
|
@@ -91,7 +91,8 @@ example = """
|
|
|
91
91
|
... y_true=labels, y_pred=scores
|
|
92
92
|
... )
|
|
93
93
|
|
|
94
|
-
|
|
94
|
+
Mask certain elements (can be used for uneven inputs):
|
|
95
|
+
|
|
95
96
|
>>> batch_size = 2
|
|
96
97
|
>>> list_size = 5
|
|
97
98
|
>>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
|
|
@@ -221,7 +221,6 @@ by sorting in descending order. {score_range_interpretation}.
|
|
|
221
221
|
For each list of predicted scores `s` in `y_pred` and the corresponding list
|
|
222
222
|
of true labels `y` in `y_true`, the per-query {metric_abbreviation} score is
|
|
223
223
|
calculated as follows:
|
|
224
|
-
|
|
225
224
|
{formula}
|
|
226
225
|
|
|
227
226
|
The final {metric_abbreviation} score reported is typically the weighted
|
|
@@ -82,7 +82,8 @@ example = """
|
|
|
82
82
|
... y_true=labels, y_pred=scores
|
|
83
83
|
... )
|
|
84
84
|
|
|
85
|
-
|
|
85
|
+
Mask certain elements (can be used for uneven inputs):
|
|
86
|
+
|
|
86
87
|
>>> batch_size = 2
|
|
87
88
|
>>> list_size = 5
|
|
88
89
|
>>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
|
|
@@ -30,8 +30,13 @@ def format_docstring(template: str, width: int = 80, **kwargs: Any) -> str:
|
|
|
30
30
|
textwrap.indent(formula_dedented, base_indent_str)
|
|
31
31
|
)
|
|
32
32
|
elif "where:" in stripped_block:
|
|
33
|
+
# Expect this to be already indented.
|
|
34
|
+
splitted_block = stripped_block.split("\n")
|
|
33
35
|
processed_output.append(
|
|
34
|
-
textwrap.indent(
|
|
36
|
+
textwrap.indent(
|
|
37
|
+
splitted_block[0] + "\n\n" + "\n".join(splitted_block[1:]),
|
|
38
|
+
base_indent_str,
|
|
39
|
+
)
|
|
35
40
|
)
|
|
36
41
|
else:
|
|
37
42
|
processed_output.append(
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: keras-rs-nightly
|
|
3
|
-
Version: 0.0.1.
|
|
3
|
+
Version: 0.0.1.dev2025043003
|
|
4
4
|
Summary: Multi-backend recommender systems with Keras 3.
|
|
5
5
|
Author-email: Keras team <keras-users@googlegroups.com>
|
|
6
6
|
License: Apache License 2.0
|
|
@@ -93,7 +93,7 @@ Call `model.fit()` on dummy data:
|
|
|
93
93
|
batch_size = 2
|
|
94
94
|
x = np.random.randint(0, vocabulary_size, size=(batch_size,))
|
|
95
95
|
y = np.random.random(size=(batch_size,))
|
|
96
|
-
model.fit(
|
|
96
|
+
model.fit(x, y=y)
|
|
97
97
|
```
|
|
98
98
|
|
|
99
99
|
### Use ranking losses and metrics
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/types.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025043003}/pyproject.toml
RENAMED
|
File without changes
|
|
File without changes
|