keras-rs-nightly 0.0.1.dev2025042803__tar.gz → 0.0.1.dev2025043003__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of keras-rs-nightly might be problematic. Click here for more details.

Files changed (50) hide show
  1. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/PKG-INFO +75 -1
  2. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/README.md +74 -0
  3. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/feature_interaction/dot_interaction.py +47 -0
  4. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/feature_interaction/feature_cross.py +25 -6
  5. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/retrieval/hard_negative_mining.py +25 -7
  6. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/retrieval/remove_accidental_hits.py +18 -5
  7. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/retrieval/sampling_probability_correction.py +18 -3
  8. keras_rs_nightly-0.0.1.dev2025043003/keras_rs/src/losses/pairwise_hinge_loss.py +90 -0
  9. keras_rs_nightly-0.0.1.dev2025043003/keras_rs/src/losses/pairwise_logistic_loss.py +99 -0
  10. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/losses/pairwise_loss.py +14 -7
  11. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/losses/pairwise_mean_squared_error.py +66 -3
  12. keras_rs_nightly-0.0.1.dev2025043003/keras_rs/src/losses/pairwise_soft_zero_one_loss.py +98 -0
  13. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/dcg.py +33 -12
  14. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/mean_average_precision.py +36 -18
  15. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/mean_reciprocal_rank.py +25 -2
  16. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/ndcg.py +42 -29
  17. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/precision_at_k.py +25 -2
  18. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/ranking_metric.py +4 -2
  19. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/recall_at_k.py +25 -2
  20. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/utils/doc_string_utils.py +6 -1
  21. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/version.py +1 -1
  22. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs_nightly.egg-info/PKG-INFO +75 -1
  23. keras_rs_nightly-0.0.1.dev2025042803/keras_rs/src/losses/pairwise_hinge_loss.py +0 -27
  24. keras_rs_nightly-0.0.1.dev2025042803/keras_rs/src/losses/pairwise_logistic_loss.py +0 -36
  25. keras_rs_nightly-0.0.1.dev2025042803/keras_rs/src/losses/pairwise_soft_zero_one_loss.py +0 -32
  26. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/api/__init__.py +0 -0
  27. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/api/layers/__init__.py +0 -0
  28. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/api/losses/__init__.py +0 -0
  29. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/api/metrics/__init__.py +0 -0
  30. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/__init__.py +0 -0
  31. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/api_export.py +0 -0
  32. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/__init__.py +0 -0
  33. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/feature_interaction/__init__.py +0 -0
  34. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/retrieval/__init__.py +0 -0
  35. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/retrieval/brute_force_retrieval.py +0 -0
  36. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/layers/retrieval/retrieval.py +0 -0
  37. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/losses/__init__.py +0 -0
  38. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/losses/pairwise_loss_utils.py +0 -0
  39. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/__init__.py +0 -0
  40. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/ranking_metrics_utils.py +0 -0
  41. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/metrics/utils.py +0 -0
  42. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/types.py +0 -0
  43. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/utils/__init__.py +0 -0
  44. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs/src/utils/keras_utils.py +0 -0
  45. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs_nightly.egg-info/SOURCES.txt +0 -0
  46. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs_nightly.egg-info/dependency_links.txt +0 -0
  47. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs_nightly.egg-info/requires.txt +0 -0
  48. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/keras_rs_nightly.egg-info/top_level.txt +0 -0
  49. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/pyproject.toml +0 -0
  50. {keras_rs_nightly-0.0.1.dev2025042803 → keras_rs_nightly-0.0.1.dev2025043003}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: keras-rs-nightly
3
- Version: 0.0.1.dev2025042803
3
+ Version: 0.0.1.dev2025043003
4
4
  Summary: Multi-backend recommender systems with Keras 3.
5
5
  Author-email: Keras team <keras-users@googlegroups.com>
6
6
  License: Apache License 2.0
@@ -36,6 +36,80 @@ This library is an extension of the core Keras API; all high-level modules
36
36
  receive that same level of polish as core Keras. If you are familiar with Keras,
37
37
  congratulations! You already understand most of Keras Recommenders.
38
38
 
39
+ ## Quick Links
40
+
41
+ - [Home page](https://keras.io/keras_rs)
42
+ - [Examples](https://keras.io/keras_rs/examples)
43
+ - [API documentation](https://keras.io/keras_rs/api)
44
+
45
+ ## Quickstart
46
+
47
+ ### Train your own cross network
48
+
49
+ Choose a backend:
50
+
51
+ ```python
52
+ import os
53
+ os.environ["KERAS_BACKEND"] = "jax" # Or "tensorflow" or "torch"!
54
+ ```
55
+
56
+ Import KerasRS and other libraries:
57
+
58
+ ```python
59
+ import keras
60
+ import keras_rs
61
+ import numpy as np
62
+ ```
63
+
64
+ Define a simple model using the `FeatureCross` layer:
65
+
66
+ ```python
67
+ vocabulary_size = 32
68
+ embedding_dim = 6
69
+
70
+ inputs = keras.Input(shape=(), name='indices', dtype="int32")
71
+ x0 = keras.layers.Embedding(
72
+ input_dim=vocabulary_size,
73
+ output_dim=embedding_dim
74
+ )(inputs)
75
+ x1 = keras_rs.layers.FeatureCross()(x0, x0)
76
+ x2 = keras_rs.layers.FeatureCross()(x0, x1)
77
+ output = keras.layers.Dense(units=10)(x2)
78
+ model = keras.Model(inputs, output)
79
+ ```
80
+
81
+ Compile the model:
82
+
83
+ ```python
84
+ model.compile(
85
+ loss=keras.losses.MeanSquaredError(),
86
+ optimizer=keras.optimizers.Adam(learning_rate=3e-4)
87
+ )
88
+ ```
89
+
90
+ Call `model.fit()` on dummy data:
91
+
92
+ ```python
93
+ batch_size = 2
94
+ x = np.random.randint(0, vocabulary_size, size=(batch_size,))
95
+ y = np.random.random(size=(batch_size,))
96
+ model.fit(x, y=y)
97
+ ```
98
+
99
+ ### Use ranking losses and metrics
100
+
101
+ If your task is to rank items in a list, you can make use of the ranking losses
102
+ and metrics which KerasRS provides. Below, we use the pairwise hinge loss and
103
+ track the nDCG metric:
104
+
105
+ ```python
106
+ model.compile(
107
+ loss=keras_rs.losses.PairwiseHingeLoss(),
108
+ metrics=[keras_rs.metrics.NDCG()],
109
+ optimizer=keras.optimizers.Adam(learning_rate=3e-4),
110
+ )
111
+ ```
112
+
39
113
  ## Installation
40
114
 
41
115
  Keras Recommenders is available on PyPI as `keras-rs`:
@@ -11,6 +11,80 @@ This library is an extension of the core Keras API; all high-level modules
11
11
  receive that same level of polish as core Keras. If you are familiar with Keras,
12
12
  congratulations! You already understand most of Keras Recommenders.
13
13
 
14
+ ## Quick Links
15
+
16
+ - [Home page](https://keras.io/keras_rs)
17
+ - [Examples](https://keras.io/keras_rs/examples)
18
+ - [API documentation](https://keras.io/keras_rs/api)
19
+
20
+ ## Quickstart
21
+
22
+ ### Train your own cross network
23
+
24
+ Choose a backend:
25
+
26
+ ```python
27
+ import os
28
+ os.environ["KERAS_BACKEND"] = "jax" # Or "tensorflow" or "torch"!
29
+ ```
30
+
31
+ Import KerasRS and other libraries:
32
+
33
+ ```python
34
+ import keras
35
+ import keras_rs
36
+ import numpy as np
37
+ ```
38
+
39
+ Define a simple model using the `FeatureCross` layer:
40
+
41
+ ```python
42
+ vocabulary_size = 32
43
+ embedding_dim = 6
44
+
45
+ inputs = keras.Input(shape=(), name='indices', dtype="int32")
46
+ x0 = keras.layers.Embedding(
47
+ input_dim=vocabulary_size,
48
+ output_dim=embedding_dim
49
+ )(inputs)
50
+ x1 = keras_rs.layers.FeatureCross()(x0, x0)
51
+ x2 = keras_rs.layers.FeatureCross()(x0, x1)
52
+ output = keras.layers.Dense(units=10)(x2)
53
+ model = keras.Model(inputs, output)
54
+ ```
55
+
56
+ Compile the model:
57
+
58
+ ```python
59
+ model.compile(
60
+ loss=keras.losses.MeanSquaredError(),
61
+ optimizer=keras.optimizers.Adam(learning_rate=3e-4)
62
+ )
63
+ ```
64
+
65
+ Call `model.fit()` on dummy data:
66
+
67
+ ```python
68
+ batch_size = 2
69
+ x = np.random.randint(0, vocabulary_size, size=(batch_size,))
70
+ y = np.random.random(size=(batch_size,))
71
+ model.fit(x, y=y)
72
+ ```
73
+
74
+ ### Use ranking losses and metrics
75
+
76
+ If your task is to rank items in a list, you can make use of the ranking losses
77
+ and metrics which KerasRS provides. Below, we use the pairwise hinge loss and
78
+ track the nDCG metric:
79
+
80
+ ```python
81
+ model.compile(
82
+ loss=keras_rs.losses.PairwiseHingeLoss(),
83
+ metrics=[keras_rs.metrics.NDCG()],
84
+ optimizer=keras.optimizers.Adam(learning_rate=3e-4),
85
+ )
86
+ ```
87
+
14
88
  ## Installation
15
89
 
16
90
  Keras Recommenders is available on PyPI as `keras-rs`:
@@ -30,6 +30,53 @@ class DotInteraction(keras.layers.Layer):
30
30
  but is much slower.
31
31
  **kwargs: Args to pass to the base class.
32
32
 
33
+ Example:
34
+
35
+ ```python
36
+ # 1. Simple forward pass
37
+ batch_size = 2
38
+ embedding_dim = 32
39
+ feature1 = np.random.randn(batch_size, embedding_dim)
40
+ feature2 = np.random.randn(batch_size, embedding_dim)
41
+ feature3 = np.random.randn(batch_size, embedding_dim)
42
+ feature_interactions = keras_rs.layers.DotInteraction()(
43
+ [feature1, feature2, feature3]
44
+ )
45
+
46
+ # 2. After embedding layer in a model
47
+ vocabulary_size = 32
48
+ embedding_dim = 6
49
+
50
+ # Create a simple model containing the layer.
51
+ feature_input_1 = keras.Input(shape=(), name='indices_1', dtype="int32")
52
+ feature_input_2 = keras.Input(shape=(), name='indices_2', dtype="int32")
53
+ feature_input_3 = keras.Input(shape=(), name='indices_3', dtype="int32")
54
+ x1 = keras.layers.Embedding(
55
+ input_dim=vocabulary_size,
56
+ output_dim=embedding_dim
57
+ )(feature_input_1)
58
+ x2 = keras.layers.Embedding(
59
+ input_dim=vocabulary_size,
60
+ output_dim=embedding_dim
61
+ )(feature_input_2)
62
+ x3 = keras.layers.Embedding(
63
+ input_dim=vocabulary_size,
64
+ output_dim=embedding_dim
65
+ )(feature_input_3)
66
+ feature_interactions = keras_rs.layers.DotInteraction()([x1, x2, x3])
67
+ output = keras.layers.Dense(units=10)(x2)
68
+ model = keras.Model(
69
+ [feature_input_1, feature_input_2, feature_input_3], output
70
+ )
71
+
72
+ # Call the model on the inputs.
73
+ batch_size = 2
74
+ f1 = np.random.randint(0, vocabulary_size, size=(batch_size,))
75
+ f2 = np.random.randint(0, vocabulary_size, size=(batch_size,))
76
+ f3 = np.random.randint(0, vocabulary_size, size=(batch_size,))
77
+ outputs = model([f1, f2, f3])
78
+ ```
79
+
33
80
  References:
34
81
  - [M. Naumov et al.](https://arxiv.org/abs/1906.00091)
35
82
  """
@@ -57,13 +57,32 @@ class FeatureCross(keras.layers.Layer):
57
57
  Example:
58
58
 
59
59
  ```python
60
- # after embedding layer in a functional model
61
- input = keras.Input(shape=(), name='indices', dtype="int64")
62
- x0 = keras.layers.Embedding(input_dim=32, output_dim=6)(x0)
63
- x1 = FeatureCross()(x0, x0)
64
- x2 = FeatureCross()(x0, x1)
60
+ # 1. Simple forward pass
61
+ batch_size = 2
62
+ embedding_dim = 32
63
+ feature1 = np.random.randn(batch_size, embedding_dim)
64
+ feature2 = np.random.randn(batch_size, embedding_dim)
65
+ crossed_features = keras_rs.layers.FeatureCross()(feature1, feature2)
66
+
67
+ # 2. After embedding layer in a model
68
+ vocabulary_size = 32
69
+ embedding_dim = 6
70
+
71
+ # Create a simple model containing the layer.
72
+ inputs = keras.Input(shape=(), name='indices', dtype="int32")
73
+ x0 = keras.layers.Embedding(
74
+ input_dim=vocabulary_size,
75
+ output_dim=embedding_dim
76
+ )(inputs)
77
+ x1 = keras_rs.layers.FeatureCross()(x0, x0)
78
+ x2 = keras_rs.layers.FeatureCross()(x0, x1)
65
79
  logits = keras.layers.Dense(units=10)(x2)
66
- model = keras.Model(input, logits)
80
+ model = keras.Model(inputs, logits)
81
+
82
+ # Call the model on the inputs.
83
+ batch_size = 2
84
+ input_data = np.random.randint(0, vocabulary_size, size=(batch_size,))
85
+ outputs = model(input_data)
67
86
  ```
68
87
 
69
88
  References:
@@ -12,11 +12,27 @@ MAX_FLOAT = ml_dtypes.finfo("float32").max / 100.0
12
12
 
13
13
  @keras_rs_export("keras_rs.layers.HardNegativeMining")
14
14
  class HardNegativeMining(keras.layers.Layer):
15
- """Transforms logits and labels to return hard negatives.
15
+ """Filter logits and labels to return hard negatives.
16
+
17
+ The output will include logits and labels for the requested number of hard
18
+ negatives as well as the positive candidate.
16
19
 
17
20
  Args:
18
21
  num_hard_negatives: How many hard negatives to return.
19
22
  **kwargs: Args to pass to the base class.
23
+
24
+ Example:
25
+
26
+ ```python
27
+ # Create layer with the configured number of hard negatives to mine.
28
+ hard_negative_mining = keras_rs.layers.HardNegativeMining(
29
+ num_hard_negatives=10
30
+ )
31
+
32
+ # This will retrieve the top 10 negative candidates plus the positive
33
+ # candidate from `labels` for each row.
34
+ out_logits, out_labels = hard_negative_mining(in_logits, in_labels)
35
+ ```
20
36
  """
21
37
 
22
38
  def __init__(self, num_hard_negatives: int, **kwargs: Any) -> None:
@@ -33,15 +49,17 @@ class HardNegativeMining(keras.layers.Layer):
33
49
  negatives as well as the positive candidate.
34
50
 
35
51
  Args:
36
- logits: logits tensor, typically `[batch_size, num_candidates]` but
37
- can have more dimensions or be 1D as `[num_candidates]`.
38
- labels: one-hot labels tensor, must be the same shape as `logits`.
52
+ logits: The logits tensor, typically `[batch_size, num_candidates]`
53
+ but can have more dimensions or be 1D as `[num_candidates]`.
54
+ labels: The one-hot labels tensor, must be the same shape as
55
+ `logits`.
39
56
 
40
57
  Returns:
41
- tuple containing two tensors with the last dimension of
58
+ A tuple containing two tensors with the last dimension of
42
59
  `num_candidates` replaced with `num_hard_negatives + 1`.
43
- - logits: `[..., num_hard_negatives + 1]` tensor of logits.
44
- - labels: `[..., num_hard_negatives + 1]` one-hot tensor of labels.
60
+
61
+ * logits: `[..., num_hard_negatives + 1]` tensor of logits.
62
+ * labels: `[..., num_hard_negatives + 1]` one-hot tensor of labels.
45
63
  """
46
64
 
47
65
  # Number of sampled logits, i.e, the number of hard negatives to be
@@ -15,6 +15,18 @@ class RemoveAccidentalHits(keras.layers.Layer):
15
15
 
16
16
  Zeroes the logits of negative candidates that have the same ID as the
17
17
  positive candidate in that row.
18
+
19
+ Example:
20
+
21
+ ```python
22
+ # Create layer with the configured number of hard negatives to mine.
23
+ remove_accidental_hits = keras_rs.layers.RemoveAccidentalHits()
24
+
25
+ # This will zero the logits of negative candidates that have the same ID as
26
+ # the positive candidate from `labels` so as to not negatively impact the
27
+ # true positive.
28
+ logits = remove_accidental_hits(logits, labels, candidate_ids)
29
+ ```
18
30
  """
19
31
 
20
32
  def call(
@@ -29,16 +41,17 @@ class RemoveAccidentalHits(keras.layers.Layer):
29
41
  have the same ID as the positive candidate in that row.
30
42
 
31
43
  Args:
32
- logits: logits tensor, typically `[batch_size, num_candidates]` but
33
- can have more dimensions or be 1D as `[num_candidates]`.
34
- labels: one-hot labels tensor, must be the same shape as `logits`.
35
- candidate_ids: candidate identifiers tensor, can be
44
+ logits: The logits tensor, typically `[batch_size, num_candidates]`
45
+ but can have more dimensions or be 1D as `[num_candidates]`.
46
+ labels: The one-hot labels tensor, must be the same shape as
47
+ `logits`.
48
+ candidate_ids: The candidate identifiers tensor, can be
36
49
  `[num_candidates]` or `[batch_size, num_candidates]` or have
37
50
  more dimensions as long as they match the last dimensions of
38
51
  `labels`.
39
52
 
40
53
  Returns:
41
- logits: Modified logits with the same shape as the input logits.
54
+ The modified logits with the same shape as the input logits.
42
55
  """
43
56
  # A more principled way is to implement
44
57
  # `softmax_cross_entropy_with_logits` with a input mask. Here we
@@ -17,6 +17,18 @@ class SamplingProbabilityCorrection(keras.layers.Layer):
17
17
  epsilon: float. Small float added to sampling probability to avoid
18
18
  taking the log of zero. Defaults to 1e-6.
19
19
  **kwargs: Args to pass to the base class.
20
+
21
+ Example:
22
+
23
+ ```python
24
+ # Create the layer.
25
+ sampling_probability_correction = (
26
+ keras_rs.layers.SamplingProbabilityCorrection()
27
+ )
28
+
29
+ # Correct the logits based on the provided candidate sampling probability.
30
+ logits = sampling_probability_correction(logits, probabilities)
31
+ ```
20
32
  """
21
33
 
22
34
  def __init__(self, epsilon: float = 1e-6, **kwargs: Any) -> None:
@@ -32,11 +44,14 @@ class SamplingProbabilityCorrection(keras.layers.Layer):
32
44
  """Corrects input logits to account for candidate sampling probability.
33
45
 
34
46
  Args:
35
- logits: The logits to correct.
36
- candidate_sampling_probability: The sampling probability.
47
+ logits: The logits tensor to correct, typically
48
+ `[batch_size, num_candidates]` but can have more dimensions or
49
+ be 1D as `[num_candidates]`.
50
+ candidate_sampling_probability: The sampling probability with the
51
+ same shape as `logits`.
37
52
 
38
53
  Returns:
39
- The corrected logits.
54
+ The corrected logits with the same shape as the input logits.
40
55
  """
41
56
  return logits - ops.log(
42
57
  ops.clip(candidate_sampling_probability, self.epsilon, 1.0)
@@ -0,0 +1,90 @@
1
+ from keras import ops
2
+
3
+ from keras_rs.src import types
4
+ from keras_rs.src.api_export import keras_rs_export
5
+ from keras_rs.src.losses.pairwise_loss import PairwiseLoss
6
+ from keras_rs.src.losses.pairwise_loss import pairwise_loss_subclass_doc_string
7
+
8
+
9
+ @keras_rs_export("keras_rs.losses.PairwiseHingeLoss")
10
+ class PairwiseHingeLoss(PairwiseLoss):
11
+ def pairwise_loss(self, pairwise_logits: types.Tensor) -> types.Tensor:
12
+ return ops.relu(ops.subtract(ops.array(1), pairwise_logits))
13
+
14
+
15
+ formula = "loss = sum_{i} sum_{j} I(y_i > y_j) * max(0, 1 - (s_i - s_j))"
16
+ explanation = """
17
+ - `max(0, 1 - (s_i - s_j))` is the hinge loss, which penalizes cases where
18
+ the score difference `s_i - s_j` is not sufficiently large when
19
+ `y_i > y_j`.
20
+ """
21
+ extra_args = ""
22
+ example = """
23
+ With `compile()` API:
24
+
25
+ ```python
26
+ model.compile(
27
+ loss=keras_rs.losses.PairwiseHingeLoss(),
28
+ ...
29
+ )
30
+ ```
31
+
32
+ As a standalone function with unbatched inputs:
33
+
34
+ >>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
35
+ >>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
36
+ >>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
37
+ >>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
38
+ 2.32000
39
+
40
+ With batched inputs using default 'auto'/'sum_over_batch_size' reduction:
41
+
42
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
43
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
44
+ >>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
45
+ >>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
46
+ 0.75
47
+
48
+ With masked inputs (useful for ragged inputs):
49
+
50
+ >>> y_true = {
51
+ ... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
52
+ ... "mask": np.array(
53
+ ... [[True, True, True, True], [True, True, False, False]]
54
+ ... ),
55
+ ... }
56
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
57
+ >>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
58
+ 0.64999
59
+
60
+ With `sample_weight`:
61
+
62
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
63
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
64
+ >>> sample_weight = np.array(
65
+ ... [[2.0, 3.0, 1.0, 1.0], [2.0, 1.0, 0.0, 0.0]]
66
+ ... )
67
+ >>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
68
+ >>> pairwise_hinge_loss(
69
+ ... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight
70
+ ... )
71
+ 1.02499
72
+
73
+ Using `'none'` reduction:
74
+
75
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
76
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
77
+ >>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss(
78
+ ... reduction="none"
79
+ ... )
80
+ >>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
81
+ [[3. , 0. , 2. , 0.], [0., 0.20000005, 0.79999995, 0.]]
82
+ """
83
+
84
+ PairwiseHingeLoss.__doc__ = pairwise_loss_subclass_doc_string.format(
85
+ loss_name="hinge loss",
86
+ formula=formula,
87
+ explanation=explanation,
88
+ extra_args=extra_args,
89
+ example=example,
90
+ )
@@ -0,0 +1,99 @@
1
+ from keras import ops
2
+
3
+ from keras_rs.src import types
4
+ from keras_rs.src.api_export import keras_rs_export
5
+ from keras_rs.src.losses.pairwise_loss import PairwiseLoss
6
+ from keras_rs.src.losses.pairwise_loss import pairwise_loss_subclass_doc_string
7
+
8
+
9
+ @keras_rs_export("keras_rs.losses.PairwiseLogisticLoss")
10
+ class PairwiseLogisticLoss(PairwiseLoss):
11
+ def pairwise_loss(self, pairwise_logits: types.Tensor) -> types.Tensor:
12
+ return ops.add(
13
+ ops.relu(ops.negative(pairwise_logits)),
14
+ ops.log(
15
+ ops.add(
16
+ ops.array(1),
17
+ ops.exp(ops.negative(ops.abs(pairwise_logits))),
18
+ )
19
+ ),
20
+ )
21
+
22
+
23
+ formula = "loss = sum_{i} sum_{j} I(y_i > y_j) * log(1 + exp(-(s_i - s_j)))"
24
+ explanation = """
25
+ - `log(1 + exp(-(s_i - s_j)))` is the logistic loss, which penalizes
26
+ cases where the score difference `s_i - s_j` is not sufficiently large
27
+ when `y_i > y_j`. This function provides a smooth approximation of the
28
+ ideal step function, making it suitable for gradient-based optimization.
29
+ """
30
+ extra_args = ""
31
+ example = """
32
+ With `compile()` API:
33
+
34
+ ```python
35
+ model.compile(
36
+ loss=keras_rs.losses.PairwiseLogisticLoss(),
37
+ ...
38
+ )
39
+ ```
40
+
41
+ As a standalone function with unbatched inputs:
42
+
43
+ >>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
44
+ >>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
45
+ >>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
46
+ >>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
47
+ >>> 1.70708
48
+
49
+ With batched inputs using default 'auto'/'sum_over_batch_size' reduction:
50
+
51
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
52
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
53
+ >>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
54
+ >>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
55
+ 0.73936
56
+
57
+ With masked inputs (useful for ragged inputs):
58
+
59
+ >>> y_true = {
60
+ ... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
61
+ ... "mask": np.array(
62
+ ... [[True, True, True, True], [True, True, False, False]]
63
+ ... ),
64
+ ... }
65
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
66
+ >>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
67
+ 0.53751
68
+
69
+ With `sample_weight`:
70
+
71
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
72
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
73
+ >>> sample_weight = np.array(
74
+ ... [[2.0, 3.0, 1.0, 1.0], [2.0, 1.0, 0.0, 0.0]]
75
+ ... )
76
+ >>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
77
+ >>> pairwise_logistic_loss(
78
+ ... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight
79
+ ... )
80
+ >>> 0.80337
81
+
82
+ Using `'none'` reduction:
83
+
84
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
85
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
86
+ >>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss(
87
+ ... reduction="none"
88
+ ... )
89
+ >>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
90
+ [[2.126928, 0., 1.3132616, 0.48877698], [0., 0.20000005, 0.79999995, 0.]]
91
+ """
92
+
93
+ PairwiseLogisticLoss.__doc__ = pairwise_loss_subclass_doc_string.format(
94
+ loss_name="logistic loss",
95
+ formula=formula,
96
+ explanation=explanation,
97
+ extra_args=extra_args,
98
+ example=example,
99
+ )
@@ -70,7 +70,8 @@ class PairwiseLoss(keras.losses.Loss, abc.ABC):
70
70
  y_true: types.Tensor,
71
71
  y_pred: types.Tensor,
72
72
  ) -> types.Tensor:
73
- """
73
+ """Compute the pairwise loss.
74
+
74
75
  Args:
75
76
  y_true: tensor or dict. Ground truth values. If tensor, of shape
76
77
  `(list_size)` for unbatched inputs or `(batch_size, list_size)`
@@ -83,6 +84,9 @@ class PairwiseLoss(keras.losses.Loss, abc.ABC):
83
84
  y_pred: tensor. The predicted values, of shape `(list_size)` for
84
85
  unbatched inputs or `(batch_size, list_size)` for batched
85
86
  inputs. Should be of the same shape as `y_true`.
87
+
88
+ Returns:
89
+ The loss.
86
90
  """
87
91
  mask = None
88
92
  if isinstance(y_true, dict):
@@ -134,11 +138,12 @@ pairwise_loss_subclass_doc_string = (
134
138
  ```
135
139
 
136
140
  where:
137
- - `y_i` and `y_j` are the true labels of items `i` and `j`, respectively.
138
- - `s_i` and `s_j` are the predicted scores of items `i` and `j`,
139
- respectively.
140
- - `I(y_i > y_j)` is an indicator function that equals 1 if `y_i > y_j`,
141
- and 0 otherwise.{explanation}
141
+
142
+ - `y_i` and `y_j` are the true labels of items `i` and `j`, respectively.
143
+ - `s_i` and `s_j` are the predicted scores of items `i` and `j`,
144
+ respectively.
145
+ - `I(y_i > y_j)` is an indicator function that equals 1 if `y_i > y_j`,
146
+ and 0 otherwise.{explanation}
142
147
  Args:{extra_args}
143
148
  reduction: Type of reduction to apply to the loss. In almost all cases
144
149
  this should be `"sum_over_batch_size"`. Supported options are
@@ -154,5 +159,7 @@ pairwise_loss_subclass_doc_string = (
154
159
  `"float32"` unless set to different value
155
160
  (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
156
161
  provided, then the `compute_dtype` will be utilized.
157
- """
162
+
163
+ Examples:
164
+ {example}"""
158
165
  )