keras-rs-nightly 0.0.1.dev2025042903__tar.gz → 0.0.1.dev2025050103__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of keras-rs-nightly might be problematic. Click here for more details.

Files changed (47) hide show
  1. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/PKG-INFO +2 -2
  2. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/README.md +1 -1
  3. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/layers/retrieval/hard_negative_mining.py +3 -2
  4. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/layers/retrieval/remove_accidental_hits.py +1 -1
  5. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/layers/retrieval/sampling_probability_correction.py +2 -1
  6. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/losses/pairwise_hinge_loss.py +14 -11
  7. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/losses/pairwise_logistic_loss.py +15 -12
  8. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/losses/pairwise_loss.py +11 -6
  9. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/losses/pairwise_mean_squared_error.py +14 -11
  10. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/losses/pairwise_soft_zero_one_loss.py +15 -12
  11. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/metrics/dcg.py +11 -11
  12. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/metrics/mean_average_precision.py +15 -18
  13. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/metrics/mean_reciprocal_rank.py +6 -5
  14. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/metrics/ndcg.py +21 -29
  15. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/metrics/precision_at_k.py +5 -4
  16. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/metrics/ranking_metric.py +7 -2
  17. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/metrics/ranking_metrics_utils.py +2 -2
  18. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/metrics/recall_at_k.py +4 -3
  19. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/utils/doc_string_utils.py +6 -1
  20. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/version.py +1 -1
  21. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs_nightly.egg-info/PKG-INFO +2 -2
  22. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/api/__init__.py +0 -0
  23. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/api/layers/__init__.py +0 -0
  24. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/api/losses/__init__.py +0 -0
  25. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/api/metrics/__init__.py +0 -0
  26. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/__init__.py +0 -0
  27. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/api_export.py +0 -0
  28. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/layers/__init__.py +0 -0
  29. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/layers/feature_interaction/__init__.py +0 -0
  30. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/layers/feature_interaction/dot_interaction.py +0 -0
  31. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/layers/feature_interaction/feature_cross.py +0 -0
  32. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/layers/retrieval/__init__.py +0 -0
  33. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/layers/retrieval/brute_force_retrieval.py +0 -0
  34. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/layers/retrieval/retrieval.py +0 -0
  35. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/losses/__init__.py +0 -0
  36. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/losses/pairwise_loss_utils.py +0 -0
  37. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/metrics/__init__.py +0 -0
  38. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/metrics/utils.py +0 -0
  39. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/types.py +0 -0
  40. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/utils/__init__.py +0 -0
  41. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs/src/utils/keras_utils.py +0 -0
  42. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs_nightly.egg-info/SOURCES.txt +0 -0
  43. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs_nightly.egg-info/dependency_links.txt +0 -0
  44. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs_nightly.egg-info/requires.txt +0 -0
  45. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/keras_rs_nightly.egg-info/top_level.txt +0 -0
  46. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/pyproject.toml +0 -0
  47. {keras_rs_nightly-0.0.1.dev2025042903 → keras_rs_nightly-0.0.1.dev2025050103}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: keras-rs-nightly
3
- Version: 0.0.1.dev2025042903
3
+ Version: 0.0.1.dev2025050103
4
4
  Summary: Multi-backend recommender systems with Keras 3.
5
5
  Author-email: Keras team <keras-users@googlegroups.com>
6
6
  License: Apache License 2.0
@@ -93,7 +93,7 @@ Call `model.fit()` on dummy data:
93
93
  batch_size = 2
94
94
  x = np.random.randint(0, vocabulary_size, size=(batch_size,))
95
95
  y = np.random.random(size=(batch_size,))
96
- model.fit(input_data, y=y)
96
+ model.fit(x, y=y)
97
97
  ```
98
98
 
99
99
  ### Use ranking losses and metrics
@@ -68,7 +68,7 @@ Call `model.fit()` on dummy data:
68
68
  batch_size = 2
69
69
  x = np.random.randint(0, vocabulary_size, size=(batch_size,))
70
70
  y = np.random.random(size=(batch_size,))
71
- model.fit(input_data, y=y)
71
+ model.fit(x, y=y)
72
72
  ```
73
73
 
74
74
  ### Use ranking losses and metrics
@@ -57,8 +57,9 @@ class HardNegativeMining(keras.layers.Layer):
57
57
  Returns:
58
58
  A tuple containing two tensors with the last dimension of
59
59
  `num_candidates` replaced with `num_hard_negatives + 1`.
60
- - logits: `[..., num_hard_negatives + 1]` tensor of logits.
61
- - labels: `[..., num_hard_negatives + 1]` one-hot tensor of labels.
60
+
61
+ * logits: `[..., num_hard_negatives + 1]` tensor of logits.
62
+ * labels: `[..., num_hard_negatives + 1]` one-hot tensor of labels.
62
63
  """
63
64
 
64
65
  # Number of sampled logits, i.e, the number of hard negatives to be
@@ -51,7 +51,7 @@ class RemoveAccidentalHits(keras.layers.Layer):
51
51
  `labels`.
52
52
 
53
53
  Returns:
54
- logits: The modified logits with the same shape as the input logits.
54
+ The modified logits with the same shape as the input logits.
55
55
  """
56
56
  # A more principled way is to implement
57
57
  # `softmax_cross_entropy_with_logits` with a input mask. Here we
@@ -50,7 +50,8 @@ class SamplingProbabilityCorrection(keras.layers.Layer):
50
50
  candidate_sampling_probability: The sampling probability with the
51
51
  same shape as `logits`.
52
52
 
53
- Returns: The corrected logits with the same shape as the input logits.
53
+ Returns:
54
+ The corrected logits with the same shape as the input logits.
54
55
  """
55
56
  return logits - ops.log(
56
57
  ops.clip(candidate_sampling_probability, self.epsilon, 1.0)
@@ -14,13 +14,13 @@ class PairwiseHingeLoss(PairwiseLoss):
14
14
 
15
15
  formula = "loss = sum_{i} sum_{j} I(y_i > y_j) * max(0, 1 - (s_i - s_j))"
16
16
  explanation = """
17
- - `max(0, 1 - (s_i - s_j))` is the hinge loss, which penalizes cases where
18
- the score difference `s_i - s_j` is not sufficiently large when
19
- `y_i > y_j`.
17
+ - `max(0, 1 - (s_i - s_j))` is the hinge loss, which penalizes cases where
18
+ the score difference `s_i - s_j` is not sufficiently large when
19
+ `y_i > y_j`.
20
20
  """
21
21
  extra_args = ""
22
22
  example = """
23
- 1. With `compile()` API:
23
+ With `compile()` API:
24
24
 
25
25
  ```python
26
26
  model.compile(
@@ -29,23 +29,24 @@ example = """
29
29
  )
30
30
  ```
31
31
 
32
- 2. As a standalone function:
33
- 2.1. Unbatched inputs
32
+ As a standalone function with unbatched inputs:
33
+
34
34
  >>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
35
35
  >>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
36
36
  >>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
37
37
  >>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
38
38
  2.32000
39
39
 
40
- 2.2 Batched inputs
41
- 2.2.1 Using default 'auto'/'sum_over_batch_size' reduction.
40
+ With batched inputs using default 'auto'/'sum_over_batch_size' reduction:
41
+
42
42
  >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
43
43
  >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
44
44
  >>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
45
45
  >>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
46
46
  0.75
47
47
 
48
- 2.2.2. With masked inputs (useful for ragged inputs)
48
+ With masked inputs (useful for ragged inputs):
49
+
49
50
  >>> y_true = {
50
51
  ... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
51
52
  ... "mask": np.array(
@@ -56,7 +57,8 @@ example = """
56
57
  >>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
57
58
  0.64999
58
59
 
59
- 2.2.3 With `sample_weight`
60
+ With `sample_weight`:
61
+
60
62
  >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
61
63
  >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
62
64
  >>> sample_weight = np.array(
@@ -68,7 +70,8 @@ example = """
68
70
  ... )
69
71
  1.02499
70
72
 
71
- 2.2.4 Using `'none'` reduction.
73
+ Using `'none'` reduction:
74
+
72
75
  >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
73
76
  >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
74
77
  >>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss(
@@ -22,14 +22,14 @@ class PairwiseLogisticLoss(PairwiseLoss):
22
22
 
23
23
  formula = "loss = sum_{i} sum_{j} I(y_i > y_j) * log(1 + exp(-(s_i - s_j)))"
24
24
  explanation = """
25
- - `log(1 + exp(-(s_i - s_j)))` is the logistic loss, which penalizes
26
- cases where the score difference `s_i - s_j` is not sufficiently large
27
- when `y_i > y_j`. This function provides a smooth approximation of the
28
- ideal step function, making it suitable for gradient-based optimization.
25
+ - `log(1 + exp(-(s_i - s_j)))` is the logistic loss, which penalizes
26
+ cases where the score difference `s_i - s_j` is not sufficiently large
27
+ when `y_i > y_j`. This function provides a smooth approximation of the
28
+ ideal step function, making it suitable for gradient-based optimization.
29
29
  """
30
30
  extra_args = ""
31
31
  example = """
32
- 1. With `compile()` API:
32
+ With `compile()` API:
33
33
 
34
34
  ```python
35
35
  model.compile(
@@ -38,23 +38,24 @@ example = """
38
38
  )
39
39
  ```
40
40
 
41
- 2. As a standalone function:
42
- 2.1. Unbatched inputs
41
+ As a standalone function with unbatched inputs:
42
+
43
43
  >>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
44
44
  >>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
45
45
  >>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
46
46
  >>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
47
47
  >>> 1.70708
48
48
 
49
- 2.2 Batched inputs
50
- 2.2.1 Using default 'auto'/'sum_over_batch_size' reduction.
49
+ With batched inputs using default 'auto'/'sum_over_batch_size' reduction:
50
+
51
51
  >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
52
52
  >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
53
53
  >>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
54
54
  >>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
55
55
  0.73936
56
56
 
57
- 2.2.2. With masked inputs (useful for ragged inputs)
57
+ With masked inputs (useful for ragged inputs):
58
+
58
59
  >>> y_true = {
59
60
  ... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
60
61
  ... "mask": np.array(
@@ -65,7 +66,8 @@ example = """
65
66
  >>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
66
67
  0.53751
67
68
 
68
- 2.2.3 With `sample_weight`
69
+ With `sample_weight`:
70
+
69
71
  >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
70
72
  >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
71
73
  >>> sample_weight = np.array(
@@ -77,7 +79,8 @@ example = """
77
79
  ... )
78
80
  >>> 0.80337
79
81
 
80
- 2.2.4 Using `'none'` reduction.
82
+ Using `'none'` reduction:
83
+
81
84
  >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
82
85
  >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
83
86
  >>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss(
@@ -70,7 +70,8 @@ class PairwiseLoss(keras.losses.Loss, abc.ABC):
70
70
  y_true: types.Tensor,
71
71
  y_pred: types.Tensor,
72
72
  ) -> types.Tensor:
73
- """
73
+ """Compute the pairwise loss.
74
+
74
75
  Args:
75
76
  y_true: tensor or dict. Ground truth values. If tensor, of shape
76
77
  `(list_size)` for unbatched inputs or `(batch_size, list_size)`
@@ -83,6 +84,9 @@ class PairwiseLoss(keras.losses.Loss, abc.ABC):
83
84
  y_pred: tensor. The predicted values, of shape `(list_size)` for
84
85
  unbatched inputs or `(batch_size, list_size)` for batched
85
86
  inputs. Should be of the same shape as `y_true`.
87
+
88
+ Returns:
89
+ The loss.
86
90
  """
87
91
  mask = None
88
92
  if isinstance(y_true, dict):
@@ -134,11 +138,12 @@ pairwise_loss_subclass_doc_string = (
134
138
  ```
135
139
 
136
140
  where:
137
- - `y_i` and `y_j` are the true labels of items `i` and `j`, respectively.
138
- - `s_i` and `s_j` are the predicted scores of items `i` and `j`,
139
- respectively.
140
- - `I(y_i > y_j)` is an indicator function that equals 1 if `y_i > y_j`,
141
- and 0 otherwise.{explanation}
141
+
142
+ - `y_i` and `y_j` are the true labels of items `i` and `j`, respectively.
143
+ - `s_i` and `s_j` are the predicted scores of items `i` and `j`,
144
+ respectively.
145
+ - `I(y_i > y_j)` is an indicator function that equals 1 if `y_i > y_j`,
146
+ and 0 otherwise.{explanation}
142
147
  Args:{extra_args}
143
148
  reduction: Type of reduction to apply to the loss. In almost all cases
144
149
  this should be `"sum_over_batch_size"`. Supported options are
@@ -59,13 +59,13 @@ class PairwiseMeanSquaredError(PairwiseLoss):
59
59
 
60
60
  formula = "loss = sum_{i} sum_{j} I(y_i > y_j) * (s_i - s_j)^2"
61
61
  explanation = """
62
- - `(s_i - s_j)^2` is the squared difference between the predicted scores
63
- of items `i` and `j`, which penalizes discrepancies between the
64
- predicted order of items relative to their true order.
62
+ - `(s_i - s_j)^2` is the squared difference between the predicted scores
63
+ of items `i` and `j`, which penalizes discrepancies between the predicted
64
+ order of items relative to their true order.
65
65
  """
66
66
  extra_args = ""
67
67
  example = """
68
- 1. With `compile()` API:
68
+ With `compile()` API:
69
69
 
70
70
  ```python
71
71
  model.compile(
@@ -74,23 +74,24 @@ example = """
74
74
  )
75
75
  ```
76
76
 
77
- 2. As a standalone function:
78
- 2.1. Unbatched inputs
77
+ As a standalone function with unbatched inputs:
78
+
79
79
  >>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
80
80
  >>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
81
81
  >>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError()
82
82
  >>> pairwise_mse(y_true=y_true, y_pred=y_pred)
83
83
  >>> 19.10400
84
84
 
85
- 2.2 Batched inputs
86
- 2.2.1 Using default 'auto'/'sum_over_batch_size' reduction.
85
+ With batched inputs using default 'auto'/'sum_over_batch_size' reduction:
86
+
87
87
  >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
88
88
  >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
89
89
  >>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError()
90
90
  >>> pairwise_mse(y_true=y_true, y_pred=y_pred)
91
91
  5.57999
92
92
 
93
- 2.2.2. With masked inputs (useful for ragged inputs)
93
+ With masked inputs (useful for ragged inputs):
94
+
94
95
  >>> y_true = {
95
96
  ... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
96
97
  ... "mask": np.array(
@@ -101,7 +102,8 @@ example = """
101
102
  >>> pairwise_mse(y_true=y_true, y_pred=y_pred)
102
103
  4.76000
103
104
 
104
- 2.2.3 With `sample_weight`
105
+ With `sample_weight`:
106
+
105
107
  >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
106
108
  >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
107
109
  >>> sample_weight = np.array(
@@ -113,7 +115,8 @@ example = """
113
115
  ... )
114
116
  11.0500
115
117
 
116
- 2.2.4 Using `'none'` reduction.
118
+ Using `'none'` reduction:
119
+
117
120
  >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
118
121
  >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
119
122
  >>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError(
@@ -18,14 +18,14 @@ class PairwiseSoftZeroOneLoss(PairwiseLoss):
18
18
 
19
19
  formula = "loss = sum_{i} sum_{j} I(y_i > y_j) * (1 - sigmoid(s_i - s_j))"
20
20
  explanation = """
21
- - `(1 - sigmoid(s_i - s_j))` represents the soft zero-one loss, which
22
- approximates the ideal zero-one loss (which would be 1 if `s_i < s_j`
23
- and 0 otherwise) with a smooth, differentiable function. This makes it
24
- suitable for gradient-based optimization.
21
+ - `(1 - sigmoid(s_i - s_j))` represents the soft zero-one loss, which
22
+ approximates the ideal zero-one loss (which would be 1 if `s_i < s_j`
23
+ and 0 otherwise) with a smooth, differentiable function. This makes it
24
+ suitable for gradient-based optimization.
25
25
  """
26
26
  extra_args = ""
27
27
  example = """
28
- 1. With `compile()` API:
28
+ With `compile()` API:
29
29
 
30
30
  ```python
31
31
  model.compile(
@@ -34,23 +34,24 @@ example = """
34
34
  )
35
35
  ```
36
36
 
37
- 2. As a standalone function:
38
- 2.1. Unbatched inputs
37
+ As a standalone function with unbatched inputs:
38
+
39
39
  >>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
40
40
  >>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
41
41
  >>> pairwise_soft_zero_one_loss = keras_rs.losses.PairwiseSoftZeroOneLoss()
42
42
  >>> pairwise_soft_zero_one_loss(y_true=y_true, y_pred=y_pred)
43
43
  0.86103
44
44
 
45
- 2.2 Batched inputs
46
- 2.2.1 Using default 'auto'/'sum_over_batch_size' reduction.
45
+ With batched inputs using default 'auto'/'sum_over_batch_size' reduction:
46
+
47
47
  >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
48
48
  >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
49
49
  >>> pairwise_soft_zero_one_loss = keras_rs.losses.PairwiseSoftZeroOneLoss()
50
50
  >>> pairwise_soft_zero_one_loss(y_true=y_true, y_pred=y_pred)
51
51
  0.46202
52
52
 
53
- 2.2.2. With masked inputs (useful for ragged inputs)
53
+ With masked inputs (useful for ragged inputs):
54
+
54
55
  >>> y_true = {
55
56
  ... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
56
57
  ... "mask": np.array(
@@ -61,7 +62,8 @@ example = """
61
62
  >>> pairwise_soft_zero_one_loss(y_true=y_true, y_pred=y_pred)
62
63
  0.29468
63
64
 
64
- 2.2.3 With `sample_weight`
65
+ With `sample_weight`:
66
+
65
67
  >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
66
68
  >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
67
69
  >>> sample_weight = np.array(
@@ -73,7 +75,8 @@ example = """
73
75
  ... )
74
76
  0.40478
75
77
 
76
- 2.2.4 Using `'none'` reduction.
78
+ Using `'none'` reduction:
79
+
77
80
  >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
78
81
  >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
79
82
  >>> pairwise_soft_zero_one_loss = keras_rs.losses.PairwiseSoftZeroOneLoss(
@@ -111,16 +111,15 @@ DCG@k(y', w') = sum_{i=1}^{k} (gain_fn(y'_i) / rank_discount_fn(i))
111
111
  ```
112
112
 
113
113
  where:
114
- - `y'_i` is the true relevance score of the item ranked at position `i`
115
- (obtained by sorting `y_true` according to `y_pred`).
116
- - `gain_fn` is the user-provided function mapping relevance `y'_i` to a
117
- gain value. The default function (`default_gain_fn`) is typically
118
- equivalent to `lambda y: 2**y - 1`.
119
- - `rank_discount_fn` is the user-provided function mapping rank `i`
120
- to a discount value. The default function (`default_rank_discount_fn`)
121
- is typically equivalent to `lambda rank: 1 / log2(rank + 1)`.
122
- - The final result aggregates these per-list scores.
123
- """
114
+ - `y'_i` is the true relevance score of the item ranked at position `i`
115
+ (obtained by sorting `y_true` according to `y_pred`).
116
+ - `gain_fn` is the user-provided function mapping relevance `y'_i` to a
117
+ gain value. The default function (`default_gain_fn`) is typically
118
+ equivalent to `lambda y: 2**y - 1`.
119
+ - `rank_discount_fn` is the user-provided function mapping rank `i`
120
+ to a discount value. The default function (`default_rank_discount_fn`)
121
+ is typically equivalent to `lambda rank: 1 / log2(rank + 1)`.
122
+ - The final result aggregates these per-list scores."""
124
123
  extra_args = """
125
124
  gain_fn: callable. Maps relevance scores (`y_true`) to gain values. The
126
125
  default implements `2**y - 1`.
@@ -136,7 +135,8 @@ example = """
136
135
  ... y_true=labels, y_pred=scores
137
136
  ... )
138
137
 
139
- # Mask certain elements (can be used for uneven inputs)
138
+ Mask certain elements (can be used for uneven inputs):
139
+
140
140
  >>> batch_size = 2
141
141
  >>> list_size = 5
142
142
  >>> labels = np.random.randint(0, 3, size=(batch_size, list_size))
@@ -25,7 +25,7 @@ class MeanAveragePrecision(RankingMetric):
25
25
  ) -> types.Tensor:
26
26
  relevance = ops.cast(
27
27
  ops.greater_equal(y_true, ops.cast(1, dtype=y_true.dtype)),
28
- dtype="float32",
28
+ dtype=y_pred.dtype,
29
29
  )
30
30
  sorted_relevance, sorted_weights = sort_by_scores(
31
31
  tensors_to_sort=[relevance, sample_weight],
@@ -82,22 +82,18 @@ rel(j) = y_i if rank(s_i) = j
82
82
  ```
83
83
 
84
84
  where:
85
- - `j` represents the rank position (starting from 1).
86
- - `sum_j` indicates a summation over all ranks `j` from 1 up to the list
87
- size (or `k`).
88
- - `P@j(y, s)` denotes the Precision at rank `j`, calculated as the
89
- number of relevant items found within the top `j` positions divided by
90
- `j`.
91
- - `rel(j)` represents the relevance of the item specifically at rank
92
- `j`. `rel(j)` is 1 if the item at rank `j` is relevant, and 0
93
- otherwise.
94
- - `y_i` is the true relevance label of the original item `i` before
95
- ranking.
96
- - `rank(s_i)` is the rank position assigned to item `i` based on its
97
- score `s_i`.
98
- - `sum_i y_i` calculates the total number of relevant items in the
99
- original list `y`.
100
- """
85
+ - `j` represents the rank position (starting from 1).
86
+ - `sum_j` indicates a summation over all ranks `j` from 1 up to the list
87
+ size (or `k`).
88
+ - `P@j(y, s)` denotes the Precision at rank `j`, calculated as the
89
+ number of relevant items found within the top `j` positions divided by `j`.
90
+ - `rel(j)` represents the relevance of the item specifically at rank
91
+ `j`. `rel(j)` is 1 if the item at rank `j` is relevant, and 0 otherwise.
92
+ - `y_i` is the true relevance label of the original item `i` before ranking.
93
+ - `rank(s_i)` is the rank position assigned to item `i` based on its score
94
+ `s_i`.
95
+ - `sum_i y_i` calculates the total number of relevant items in the original
96
+ list `y`."""
101
97
  extra_args = ""
102
98
  example = """
103
99
  >>> batch_size = 2
@@ -108,7 +104,8 @@ example = """
108
104
  ... y_true=labels, y_pred=scores
109
105
  ... )
110
106
 
111
- # Mask certain elements (can be used for uneven inputs)
107
+ Mask certain elements (can be used for uneven inputs):
108
+
112
109
  >>> batch_size = 2
113
110
  >>> list_size = 5
114
111
  >>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
@@ -44,13 +44,13 @@ class MeanReciprocalRank(RankingMetric):
44
44
  ops.greater_equal(
45
45
  sorted_y_true, ops.cast(1, dtype=sorted_y_true.dtype)
46
46
  ),
47
- dtype="float32",
47
+ dtype=y_pred.dtype,
48
48
  )
49
49
 
50
50
  # `reciprocal_rank = [1, 0.5, 0.33]`
51
51
  reciprocal_rank = ops.divide(
52
- ops.cast(1, dtype="float32"),
53
- ops.arange(1, list_length + 1, dtype="float32"),
52
+ ops.cast(1, dtype=y_pred.dtype),
53
+ ops.arange(1, list_length + 1, dtype=y_pred.dtype),
54
54
  )
55
55
 
56
56
  # `mrr` should be of shape `(batch_size, 1)`.
@@ -64,7 +64,7 @@ class MeanReciprocalRank(RankingMetric):
64
64
  # Get weights.
65
65
  overall_relevance = ops.cast(
66
66
  ops.greater_equal(y_true, ops.cast(1, dtype=y_true.dtype)),
67
- dtype="float32",
67
+ dtype=y_pred.dtype,
68
68
  )
69
69
  per_list_weights = get_list_weights(
70
70
  weights=sample_weight, relevance=overall_relevance
@@ -95,7 +95,8 @@ example = """
95
95
  ... y_true=labels, y_pred=scores
96
96
  ... )
97
97
 
98
- # Mask certain elements (can be used for uneven inputs)
98
+ Mask certain elements (can be used for uneven inputs):
99
+
99
100
  >>> batch_size = 2
100
101
  >>> list_size = 5
101
102
  >>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
@@ -109,7 +109,7 @@ class NDCG(RankingMetric):
109
109
 
110
110
  concept_sentence = (
111
111
  "It normalizes the Discounted Cumulative Gain (DCG) with the Ideal "
112
- "Discounted Cumulative Gain (IDCG) for each list."
112
+ "Discounted Cumulative Gain (IDCG) for each list"
113
113
  )
114
114
  relevance_type = (
115
115
  "graded relevance scores (non-negative numbers where higher values "
@@ -124,11 +124,6 @@ score_range_interpretation = (
124
124
  )
125
125
 
126
126
  formula = """
127
- The metric calculates a weighted average nDCG score per list.
128
- For a single list, nDCG is computed as the ratio of the Discounted
129
- Cumulative Gain (DCG) of the predicted ranking to the Ideal Discounted
130
- Cumulative Gain (IDCG) of the best possible ranking:
131
-
132
127
  ```
133
128
  nDCG@k = DCG@k / IDCG@k
134
129
  ```
@@ -147,29 +142,25 @@ IDCG@k(y'') = sum_{i=1}^{k} (gain_fn(y''_i) / rank_discount_fn(i))
147
142
  ```
148
143
 
149
144
  where:
150
- - `y'_i`: True relevance of the item at rank `i` in
151
- the ranking induced by `y_pred`.
152
- - `y''_i` True relevance of the item at rank `i` in
153
- the *ideal* ranking (sorted by `y_true` descending).
154
- - `gain_fn` is the user-provided function mapping relevance to gain.
155
- The default function (`default_gain_fn`) is typically equivalent to
156
- `lambda y: 2**y - 1`.
157
- - `rank_discount_fn` is the user-provided function mapping rank `i`
158
- (1-based) to a discount value. The default function
159
- (`default_rank_discount_fn`) is typically equivalent to
160
- `lambda rank: 1 / log2(rank + 1)`.
161
- - If IDCG@k is 0 (e.g., no relevant items), nDCG@k is defined as 0.
162
- - The final result often aggregates these per-list nDCG scores,
163
- potentially involving normalization by list-specific weights, to
164
- produce a weighted average.
165
- """
145
+ - `y'_i`: True relevance of the item at rank `i` in the ranking induced by
146
+ `y_pred`.
147
+ - `y''_i` True relevance of the item at rank `i` in the *ideal* ranking (sorted
148
+ by `y_true` descending).
149
+ - `gain_fn` is the user-provided function mapping relevance to gain. The default
150
+ function (`default_gain_fn`) is typically equivalent to `lambda y: 2**y - 1`.
151
+ - `rank_discount_fn` is the user-provided function mapping rank `i` (1-based) to
152
+ a discount value. The default function (`default_rank_discount_fn`) is
153
+ typically equivalent to `lambda rank: 1 / log2(rank + 1)`.
154
+ - If IDCG@k is 0 (e.g., no relevant items), nDCG@k is defined as 0.
155
+ - The final result often aggregates these per-list nDCG scores, potentially
156
+ involving normalization by list-specific weights, to produce a weighted
157
+ average."""
166
158
  extra_args = """
167
- gain_fn: callable. Maps relevance scores (`y_true`) to gain values. The
168
- default implements `2**y - 1`. Used for both DCG and IDCG.
169
- rank_discount_fn: callable. Maps rank positions (1-based) to discount
170
- values. The default (`default_rank_discount_fn`) typically implements
171
- `1 / log2(rank + 1)`. Used for both DCG and IDCG.
172
- """
159
+ gain_fn: callable. Maps relevance scores (`y_true`) to gain values. The
160
+ default implements `2**y - 1`.
161
+ rank_discount_fn: function. Maps rank positions to discount
162
+ values. The default (`default_rank_discount_fn`) implements
163
+ `1 / log2(rank + 1)`."""
173
164
  example = """
174
165
  >>> batch_size = 2
175
166
  >>> list_size = 5
@@ -179,7 +170,8 @@ example = """
179
170
  ... y_true=labels, y_pred=scores
180
171
  ... )
181
172
 
182
- # Mask certain elements (can be used for uneven inputs)
173
+ Mask certain elements (can be used for uneven inputs):
174
+
183
175
  >>> batch_size = 2
184
176
  >>> list_size = 5
185
177
  >>> labels = np.random.randint(0, 3, size=(batch_size, list_size))
@@ -40,7 +40,7 @@ class PrecisionAtK(RankingMetric):
40
40
  ops.greater_equal(
41
41
  sorted_y_true, ops.cast(1, dtype=sorted_y_true.dtype)
42
42
  ),
43
- dtype="float32",
43
+ dtype=y_pred.dtype,
44
44
  )
45
45
  list_length = ops.shape(sorted_y_true)[1]
46
46
  # TODO: We do not do this for MRR, and the other metrics. Do we need to
@@ -52,13 +52,13 @@ class PrecisionAtK(RankingMetric):
52
52
 
53
53
  per_list_precision = ops.divide_no_nan(
54
54
  ops.sum(relevance, axis=1, keepdims=True),
55
- ops.cast(valid_list_length, dtype="float32"),
55
+ ops.cast(valid_list_length, dtype=y_pred.dtype),
56
56
  )
57
57
 
58
58
  # Get weights.
59
59
  overall_relevance = ops.cast(
60
60
  ops.greater_equal(y_true, ops.cast(1, dtype=y_true.dtype)),
61
- dtype="float32",
61
+ dtype=y_pred.dtype,
62
62
  )
63
63
  per_list_weights = get_list_weights(
64
64
  weights=sample_weight, relevance=overall_relevance
@@ -91,7 +91,8 @@ example = """
91
91
  ... y_true=labels, y_pred=scores
92
92
  ... )
93
93
 
94
- # Mask certain elements (can be used for uneven inputs)
94
+ Mask certain elements (can be used for uneven inputs):
95
+
95
96
  >>> batch_size = 2
96
97
  >>> list_size = 5
97
98
  >>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
@@ -116,6 +116,12 @@ class RankingMetric(keras.metrics.Mean, abc.ABC):
116
116
  if passed_mask is not None:
117
117
  passed_mask = ops.convert_to_tensor(passed_mask)
118
118
 
119
+ # Cast to the correct dtype.
120
+ y_true = ops.cast(y_true, dtype=self.dtype)
121
+ y_pred = ops.cast(y_pred, dtype=self.dtype)
122
+ if sample_weight is not None:
123
+ sample_weight = ops.cast(sample_weight, dtype=self.dtype)
124
+
119
125
  # === Process `sample_weight` ===
120
126
  if sample_weight is None:
121
127
  sample_weight = ops.cast(1, dtype=y_pred.dtype)
@@ -152,7 +158,7 @@ class RankingMetric(keras.metrics.Mean, abc.ABC):
152
158
 
153
159
  # Mask all values less than 0 (since less than 0 implies invalid
154
160
  # labels).
155
- valid_mask = ops.greater_equal(y_true, ops.cast(0.0, y_true.dtype))
161
+ valid_mask = ops.greater_equal(y_true, ops.cast(0, y_true.dtype))
156
162
  if passed_mask is not None:
157
163
  valid_mask = ops.logical_and(valid_mask, passed_mask)
158
164
 
@@ -221,7 +227,6 @@ by sorting in descending order. {score_range_interpretation}.
221
227
  For each list of predicted scores `s` in `y_pred` and the corresponding list
222
228
  of true labels `y` in `y_true`, the per-query {metric_abbreviation} score is
223
229
  calculated as follows:
224
-
225
230
  {formula}
226
231
 
227
232
  The final {metric_abbreviation} score reported is typically the weighted
@@ -163,7 +163,7 @@ def get_list_weights(
163
163
  # Identify lists where both weights and relevance sums are non-zero.
164
164
  nonzero_relevance = ops.cast(
165
165
  ops.logical_and(nonzero_weights, nonzero_relevance_condition),
166
- dtype="float32",
166
+ dtype=weights.dtype,
167
167
  )
168
168
  # Count the number of lists with non-zero relevance and non-zero weights.
169
169
  nonzero_relevance_count = ops.sum(nonzero_relevance, axis=0, keepdims=True)
@@ -227,7 +227,7 @@ def compute_dcg(
227
227
  ] = default_rank_discount_fn,
228
228
  ) -> types.Tensor:
229
229
  list_size = ops.shape(y_true)[1]
230
- positions = ops.arange(1, list_size + 1, dtype="float32")
230
+ positions = ops.arange(1, list_size + 1, dtype=y_true.dtype)
231
231
  gain = gain_fn(y_true)
232
232
  discount = rank_discount_fn(positions)
233
233
 
@@ -38,11 +38,11 @@ class RecallAtK(RankingMetric):
38
38
  ops.greater_equal(
39
39
  sorted_y_true, ops.cast(1, dtype=sorted_y_true.dtype)
40
40
  ),
41
- dtype="float32",
41
+ dtype=y_pred.dtype,
42
42
  )
43
43
  overall_relevance = ops.cast(
44
44
  ops.greater_equal(y_true, ops.cast(1, dtype=y_true.dtype)),
45
- dtype="float32",
45
+ dtype=y_pred.dtype,
46
46
  )
47
47
  per_list_recall = ops.divide_no_nan(
48
48
  ops.sum(relevance, axis=1, keepdims=True),
@@ -82,7 +82,8 @@ example = """
82
82
  ... y_true=labels, y_pred=scores
83
83
  ... )
84
84
 
85
- # Mask certain elements (can be used for uneven inputs)
85
+ Mask certain elements (can be used for uneven inputs):
86
+
86
87
  >>> batch_size = 2
87
88
  >>> list_size = 5
88
89
  >>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
@@ -30,8 +30,13 @@ def format_docstring(template: str, width: int = 80, **kwargs: Any) -> str:
30
30
  textwrap.indent(formula_dedented, base_indent_str)
31
31
  )
32
32
  elif "where:" in stripped_block:
33
+ # Expect this to be already indented.
34
+ splitted_block = stripped_block.split("\n")
33
35
  processed_output.append(
34
- textwrap.indent(stripped_block, base_indent_str)
36
+ textwrap.indent(
37
+ splitted_block[0] + "\n\n" + "\n".join(splitted_block[1:]),
38
+ base_indent_str,
39
+ )
35
40
  )
36
41
  else:
37
42
  processed_output.append(
@@ -1,7 +1,7 @@
1
1
  from keras_rs.src.api_export import keras_rs_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "0.0.1.dev2025042903"
4
+ __version__ = "0.0.1.dev2025050103"
5
5
 
6
6
 
7
7
  @keras_rs_export("keras_rs.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: keras-rs-nightly
3
- Version: 0.0.1.dev2025042903
3
+ Version: 0.0.1.dev2025050103
4
4
  Summary: Multi-backend recommender systems with Keras 3.
5
5
  Author-email: Keras team <keras-users@googlegroups.com>
6
6
  License: Apache License 2.0
@@ -93,7 +93,7 @@ Call `model.fit()` on dummy data:
93
93
  batch_size = 2
94
94
  x = np.random.randint(0, vocabulary_size, size=(batch_size,))
95
95
  y = np.random.random(size=(batch_size,))
96
- model.fit(input_data, y=y)
96
+ model.fit(x, y=y)
97
97
  ```
98
98
 
99
99
  ### Use ranking losses and metrics