keras-rs-nightly 0.0.1.dev2025042703__tar.gz → 0.0.1.dev2025042903__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of keras-rs-nightly might be problematic. Click here for more details.

Files changed (50) hide show
  1. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/PKG-INFO +75 -1
  2. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/README.md +74 -0
  3. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/layers/feature_interaction/dot_interaction.py +47 -0
  4. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/layers/feature_interaction/feature_cross.py +25 -6
  5. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/layers/retrieval/hard_negative_mining.py +22 -5
  6. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/layers/retrieval/remove_accidental_hits.py +18 -5
  7. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/layers/retrieval/sampling_probability_correction.py +18 -4
  8. keras_rs_nightly-0.0.1.dev2025042903/keras_rs/src/losses/pairwise_hinge_loss.py +87 -0
  9. keras_rs_nightly-0.0.1.dev2025042903/keras_rs/src/losses/pairwise_logistic_loss.py +96 -0
  10. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/losses/pairwise_loss.py +3 -1
  11. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/losses/pairwise_mean_squared_error.py +60 -0
  12. keras_rs_nightly-0.0.1.dev2025042903/keras_rs/src/losses/pairwise_soft_zero_one_loss.py +95 -0
  13. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/metrics/dcg.py +23 -2
  14. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/metrics/mean_average_precision.py +23 -2
  15. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/metrics/mean_reciprocal_rank.py +24 -2
  16. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/metrics/ndcg.py +23 -2
  17. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/metrics/precision_at_k.py +24 -2
  18. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/metrics/ranking_metric.py +4 -1
  19. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/metrics/recall_at_k.py +24 -2
  20. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/version.py +1 -1
  21. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs_nightly.egg-info/PKG-INFO +75 -1
  22. keras_rs_nightly-0.0.1.dev2025042703/keras_rs/src/losses/pairwise_hinge_loss.py +0 -27
  23. keras_rs_nightly-0.0.1.dev2025042703/keras_rs/src/losses/pairwise_logistic_loss.py +0 -36
  24. keras_rs_nightly-0.0.1.dev2025042703/keras_rs/src/losses/pairwise_soft_zero_one_loss.py +0 -32
  25. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/api/__init__.py +0 -0
  26. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/api/layers/__init__.py +0 -0
  27. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/api/losses/__init__.py +0 -0
  28. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/api/metrics/__init__.py +0 -0
  29. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/__init__.py +0 -0
  30. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/api_export.py +0 -0
  31. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/layers/__init__.py +0 -0
  32. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/layers/feature_interaction/__init__.py +0 -0
  33. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/layers/retrieval/__init__.py +0 -0
  34. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/layers/retrieval/brute_force_retrieval.py +0 -0
  35. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/layers/retrieval/retrieval.py +0 -0
  36. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/losses/__init__.py +0 -0
  37. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/losses/pairwise_loss_utils.py +0 -0
  38. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/metrics/__init__.py +0 -0
  39. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/metrics/ranking_metrics_utils.py +0 -0
  40. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/metrics/utils.py +0 -0
  41. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/types.py +0 -0
  42. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/utils/__init__.py +0 -0
  43. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/utils/doc_string_utils.py +0 -0
  44. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs/src/utils/keras_utils.py +0 -0
  45. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs_nightly.egg-info/SOURCES.txt +0 -0
  46. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs_nightly.egg-info/dependency_links.txt +0 -0
  47. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs_nightly.egg-info/requires.txt +0 -0
  48. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/keras_rs_nightly.egg-info/top_level.txt +0 -0
  49. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/pyproject.toml +0 -0
  50. {keras_rs_nightly-0.0.1.dev2025042703 → keras_rs_nightly-0.0.1.dev2025042903}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: keras-rs-nightly
3
- Version: 0.0.1.dev2025042703
3
+ Version: 0.0.1.dev2025042903
4
4
  Summary: Multi-backend recommender systems with Keras 3.
5
5
  Author-email: Keras team <keras-users@googlegroups.com>
6
6
  License: Apache License 2.0
@@ -36,6 +36,80 @@ This library is an extension of the core Keras API; all high-level modules
36
36
  receive that same level of polish as core Keras. If you are familiar with Keras,
37
37
  congratulations! You already understand most of Keras Recommenders.
38
38
 
39
+ ## Quick Links
40
+
41
+ - [Home page](https://keras.io/keras_rs)
42
+ - [Examples](https://keras.io/keras_rs/examples)
43
+ - [API documentation](https://keras.io/keras_rs/api)
44
+
45
+ ## Quickstart
46
+
47
+ ### Train your own cross network
48
+
49
+ Choose a backend:
50
+
51
+ ```python
52
+ import os
53
+ os.environ["KERAS_BACKEND"] = "jax" # Or "tensorflow" or "torch"!
54
+ ```
55
+
56
+ Import KerasRS and other libraries:
57
+
58
+ ```python
59
+ import keras
60
+ import keras_rs
61
+ import numpy as np
62
+ ```
63
+
64
+ Define a simple model using the `FeatureCross` layer:
65
+
66
+ ```python
67
+ vocabulary_size = 32
68
+ embedding_dim = 6
69
+
70
+ inputs = keras.Input(shape=(), name='indices', dtype="int32")
71
+ x0 = keras.layers.Embedding(
72
+ input_dim=vocabulary_size,
73
+ output_dim=embedding_dim
74
+ )(inputs)
75
+ x1 = keras_rs.layers.FeatureCross()(x0, x0)
76
+ x2 = keras_rs.layers.FeatureCross()(x0, x1)
77
+ output = keras.layers.Dense(units=10)(x2)
78
+ model = keras.Model(inputs, output)
79
+ ```
80
+
81
+ Compile the model:
82
+
83
+ ```python
84
+ model.compile(
85
+ loss=keras.losses.MeanSquaredError(),
86
+ optimizer=keras.optimizers.Adam(learning_rate=3e-4)
87
+ )
88
+ ```
89
+
90
+ Call `model.fit()` on dummy data:
91
+
92
+ ```python
93
+ batch_size = 2
94
+ x = np.random.randint(0, vocabulary_size, size=(batch_size,))
95
+ y = np.random.random(size=(batch_size,))
96
+ model.fit(input_data, y=y)
97
+ ```
98
+
99
+ ### Use ranking losses and metrics
100
+
101
+ If your task is to rank items in a list, you can make use of the ranking losses
102
+ and metrics which KerasRS provides. Below, we use the pairwise hinge loss and
103
+ track the nDCG metric:
104
+
105
+ ```python
106
+ model.compile(
107
+ loss=keras_rs.losses.PairwiseHingeLoss(),
108
+ metrics=[keras_rs.metrics.NDCG()],
109
+ optimizer=keras.optimizers.Adam(learning_rate=3e-4),
110
+ )
111
+ ```
112
+
39
113
  ## Installation
40
114
 
41
115
  Keras Recommenders is available on PyPI as `keras-rs`:
@@ -11,6 +11,80 @@ This library is an extension of the core Keras API; all high-level modules
11
11
  receive that same level of polish as core Keras. If you are familiar with Keras,
12
12
  congratulations! You already understand most of Keras Recommenders.
13
13
 
14
+ ## Quick Links
15
+
16
+ - [Home page](https://keras.io/keras_rs)
17
+ - [Examples](https://keras.io/keras_rs/examples)
18
+ - [API documentation](https://keras.io/keras_rs/api)
19
+
20
+ ## Quickstart
21
+
22
+ ### Train your own cross network
23
+
24
+ Choose a backend:
25
+
26
+ ```python
27
+ import os
28
+ os.environ["KERAS_BACKEND"] = "jax" # Or "tensorflow" or "torch"!
29
+ ```
30
+
31
+ Import KerasRS and other libraries:
32
+
33
+ ```python
34
+ import keras
35
+ import keras_rs
36
+ import numpy as np
37
+ ```
38
+
39
+ Define a simple model using the `FeatureCross` layer:
40
+
41
+ ```python
42
+ vocabulary_size = 32
43
+ embedding_dim = 6
44
+
45
+ inputs = keras.Input(shape=(), name='indices', dtype="int32")
46
+ x0 = keras.layers.Embedding(
47
+ input_dim=vocabulary_size,
48
+ output_dim=embedding_dim
49
+ )(inputs)
50
+ x1 = keras_rs.layers.FeatureCross()(x0, x0)
51
+ x2 = keras_rs.layers.FeatureCross()(x0, x1)
52
+ output = keras.layers.Dense(units=10)(x2)
53
+ model = keras.Model(inputs, output)
54
+ ```
55
+
56
+ Compile the model:
57
+
58
+ ```python
59
+ model.compile(
60
+ loss=keras.losses.MeanSquaredError(),
61
+ optimizer=keras.optimizers.Adam(learning_rate=3e-4)
62
+ )
63
+ ```
64
+
65
+ Call `model.fit()` on dummy data:
66
+
67
+ ```python
68
+ batch_size = 2
69
+ x = np.random.randint(0, vocabulary_size, size=(batch_size,))
70
+ y = np.random.random(size=(batch_size,))
71
+ model.fit(input_data, y=y)
72
+ ```
73
+
74
+ ### Use ranking losses and metrics
75
+
76
+ If your task is to rank items in a list, you can make use of the ranking losses
77
+ and metrics which KerasRS provides. Below, we use the pairwise hinge loss and
78
+ track the nDCG metric:
79
+
80
+ ```python
81
+ model.compile(
82
+ loss=keras_rs.losses.PairwiseHingeLoss(),
83
+ metrics=[keras_rs.metrics.NDCG()],
84
+ optimizer=keras.optimizers.Adam(learning_rate=3e-4),
85
+ )
86
+ ```
87
+
14
88
  ## Installation
15
89
 
16
90
  Keras Recommenders is available on PyPI as `keras-rs`:
@@ -30,6 +30,53 @@ class DotInteraction(keras.layers.Layer):
30
30
  but is much slower.
31
31
  **kwargs: Args to pass to the base class.
32
32
 
33
+ Example:
34
+
35
+ ```python
36
+ # 1. Simple forward pass
37
+ batch_size = 2
38
+ embedding_dim = 32
39
+ feature1 = np.random.randn(batch_size, embedding_dim)
40
+ feature2 = np.random.randn(batch_size, embedding_dim)
41
+ feature3 = np.random.randn(batch_size, embedding_dim)
42
+ feature_interactions = keras_rs.layers.DotInteraction()(
43
+ [feature1, feature2, feature3]
44
+ )
45
+
46
+ # 2. After embedding layer in a model
47
+ vocabulary_size = 32
48
+ embedding_dim = 6
49
+
50
+ # Create a simple model containing the layer.
51
+ feature_input_1 = keras.Input(shape=(), name='indices_1', dtype="int32")
52
+ feature_input_2 = keras.Input(shape=(), name='indices_2', dtype="int32")
53
+ feature_input_3 = keras.Input(shape=(), name='indices_3', dtype="int32")
54
+ x1 = keras.layers.Embedding(
55
+ input_dim=vocabulary_size,
56
+ output_dim=embedding_dim
57
+ )(feature_input_1)
58
+ x2 = keras.layers.Embedding(
59
+ input_dim=vocabulary_size,
60
+ output_dim=embedding_dim
61
+ )(feature_input_2)
62
+ x3 = keras.layers.Embedding(
63
+ input_dim=vocabulary_size,
64
+ output_dim=embedding_dim
65
+ )(feature_input_3)
66
+ feature_interactions = keras_rs.layers.DotInteraction()([x1, x2, x3])
67
+ output = keras.layers.Dense(units=10)(x2)
68
+ model = keras.Model(
69
+ [feature_input_1, feature_input_2, feature_input_3], output
70
+ )
71
+
72
+ # Call the model on the inputs.
73
+ batch_size = 2
74
+ f1 = np.random.randint(0, vocabulary_size, size=(batch_size,))
75
+ f2 = np.random.randint(0, vocabulary_size, size=(batch_size,))
76
+ f3 = np.random.randint(0, vocabulary_size, size=(batch_size,))
77
+ outputs = model([f1, f2, f3])
78
+ ```
79
+
33
80
  References:
34
81
  - [M. Naumov et al.](https://arxiv.org/abs/1906.00091)
35
82
  """
@@ -57,13 +57,32 @@ class FeatureCross(keras.layers.Layer):
57
57
  Example:
58
58
 
59
59
  ```python
60
- # after embedding layer in a functional model
61
- input = keras.Input(shape=(), name='indices', dtype="int64")
62
- x0 = keras.layers.Embedding(input_dim=32, output_dim=6)(x0)
63
- x1 = FeatureCross()(x0, x0)
64
- x2 = FeatureCross()(x0, x1)
60
+ # 1. Simple forward pass
61
+ batch_size = 2
62
+ embedding_dim = 32
63
+ feature1 = np.random.randn(batch_size, embedding_dim)
64
+ feature2 = np.random.randn(batch_size, embedding_dim)
65
+ crossed_features = keras_rs.layers.FeatureCross()(feature1, feature2)
66
+
67
+ # 2. After embedding layer in a model
68
+ vocabulary_size = 32
69
+ embedding_dim = 6
70
+
71
+ # Create a simple model containing the layer.
72
+ inputs = keras.Input(shape=(), name='indices', dtype="int32")
73
+ x0 = keras.layers.Embedding(
74
+ input_dim=vocabulary_size,
75
+ output_dim=embedding_dim
76
+ )(inputs)
77
+ x1 = keras_rs.layers.FeatureCross()(x0, x0)
78
+ x2 = keras_rs.layers.FeatureCross()(x0, x1)
65
79
  logits = keras.layers.Dense(units=10)(x2)
66
- model = keras.Model(input, logits)
80
+ model = keras.Model(inputs, logits)
81
+
82
+ # Call the model on the inputs.
83
+ batch_size = 2
84
+ input_data = np.random.randint(0, vocabulary_size, size=(batch_size,))
85
+ outputs = model(input_data)
67
86
  ```
68
87
 
69
88
  References:
@@ -12,11 +12,27 @@ MAX_FLOAT = ml_dtypes.finfo("float32").max / 100.0
12
12
 
13
13
  @keras_rs_export("keras_rs.layers.HardNegativeMining")
14
14
  class HardNegativeMining(keras.layers.Layer):
15
- """Transforms logits and labels to return hard negatives.
15
+ """Filter logits and labels to return hard negatives.
16
+
17
+ The output will include logits and labels for the requested number of hard
18
+ negatives as well as the positive candidate.
16
19
 
17
20
  Args:
18
21
  num_hard_negatives: How many hard negatives to return.
19
22
  **kwargs: Args to pass to the base class.
23
+
24
+ Example:
25
+
26
+ ```python
27
+ # Create layer with the configured number of hard negatives to mine.
28
+ hard_negative_mining = keras_rs.layers.HardNegativeMining(
29
+ num_hard_negatives=10
30
+ )
31
+
32
+ # This will retrieve the top 10 negative candidates plus the positive
33
+ # candidate from `labels` for each row.
34
+ out_logits, out_labels = hard_negative_mining(in_logits, in_labels)
35
+ ```
20
36
  """
21
37
 
22
38
  def __init__(self, num_hard_negatives: int, **kwargs: Any) -> None:
@@ -33,12 +49,13 @@ class HardNegativeMining(keras.layers.Layer):
33
49
  negatives as well as the positive candidate.
34
50
 
35
51
  Args:
36
- logits: logits tensor, typically `[batch_size, num_candidates]` but
37
- can have more dimensions or be 1D as `[num_candidates]`.
38
- labels: one-hot labels tensor, must be the same shape as `logits`.
52
+ logits: The logits tensor, typically `[batch_size, num_candidates]`
53
+ but can have more dimensions or be 1D as `[num_candidates]`.
54
+ labels: The one-hot labels tensor, must be the same shape as
55
+ `logits`.
39
56
 
40
57
  Returns:
41
- tuple containing two tensors with the last dimension of
58
+ A tuple containing two tensors with the last dimension of
42
59
  `num_candidates` replaced with `num_hard_negatives + 1`.
43
60
  - logits: `[..., num_hard_negatives + 1]` tensor of logits.
44
61
  - labels: `[..., num_hard_negatives + 1]` one-hot tensor of labels.
@@ -15,6 +15,18 @@ class RemoveAccidentalHits(keras.layers.Layer):
15
15
 
16
16
  Zeroes the logits of negative candidates that have the same ID as the
17
17
  positive candidate in that row.
18
+
19
+ Example:
20
+
21
+ ```python
22
+ # Create layer with the configured number of hard negatives to mine.
23
+ remove_accidental_hits = keras_rs.layers.RemoveAccidentalHits()
24
+
25
+ # This will zero the logits of negative candidates that have the same ID as
26
+ # the positive candidate from `labels` so as to not negatively impact the
27
+ # true positive.
28
+ logits = remove_accidental_hits(logits, labels, candidate_ids)
29
+ ```
18
30
  """
19
31
 
20
32
  def call(
@@ -29,16 +41,17 @@ class RemoveAccidentalHits(keras.layers.Layer):
29
41
  have the same ID as the positive candidate in that row.
30
42
 
31
43
  Args:
32
- logits: logits tensor, typically `[batch_size, num_candidates]` but
33
- can have more dimensions or be 1D as `[num_candidates]`.
34
- labels: one-hot labels tensor, must be the same shape as `logits`.
35
- candidate_ids: candidate identifiers tensor, can be
44
+ logits: The logits tensor, typically `[batch_size, num_candidates]`
45
+ but can have more dimensions or be 1D as `[num_candidates]`.
46
+ labels: The one-hot labels tensor, must be the same shape as
47
+ `logits`.
48
+ candidate_ids: The candidate identifiers tensor, can be
36
49
  `[num_candidates]` or `[batch_size, num_candidates]` or have
37
50
  more dimensions as long as they match the last dimensions of
38
51
  `labels`.
39
52
 
40
53
  Returns:
41
- logits: Modified logits with the same shape as the input logits.
54
+ logits: The modified logits with the same shape as the input logits.
42
55
  """
43
56
  # A more principled way is to implement
44
57
  # `softmax_cross_entropy_with_logits` with a input mask. Here we
@@ -17,6 +17,18 @@ class SamplingProbabilityCorrection(keras.layers.Layer):
17
17
  epsilon: float. Small float added to sampling probability to avoid
18
18
  taking the log of zero. Defaults to 1e-6.
19
19
  **kwargs: Args to pass to the base class.
20
+
21
+ Example:
22
+
23
+ ```python
24
+ # Create the layer.
25
+ sampling_probability_correction = (
26
+ keras_rs.layers.SamplingProbabilityCorrection()
27
+ )
28
+
29
+ # Correct the logits based on the provided candidate sampling probability.
30
+ logits = sampling_probability_correction(logits, probabilities)
31
+ ```
20
32
  """
21
33
 
22
34
  def __init__(self, epsilon: float = 1e-6, **kwargs: Any) -> None:
@@ -32,11 +44,13 @@ class SamplingProbabilityCorrection(keras.layers.Layer):
32
44
  """Corrects input logits to account for candidate sampling probability.
33
45
 
34
46
  Args:
35
- logits: The logits to correct.
36
- candidate_sampling_probability: The sampling probability.
47
+ logits: The logits tensor to correct, typically
48
+ `[batch_size, num_candidates]` but can have more dimensions or
49
+ be 1D as `[num_candidates]`.
50
+ candidate_sampling_probability: The sampling probability with the
51
+ same shape as `logits`.
37
52
 
38
- Returns:
39
- The corrected logits.
53
+ Returns: The corrected logits with the same shape as the input logits.
40
54
  """
41
55
  return logits - ops.log(
42
56
  ops.clip(candidate_sampling_probability, self.epsilon, 1.0)
@@ -0,0 +1,87 @@
1
+ from keras import ops
2
+
3
+ from keras_rs.src import types
4
+ from keras_rs.src.api_export import keras_rs_export
5
+ from keras_rs.src.losses.pairwise_loss import PairwiseLoss
6
+ from keras_rs.src.losses.pairwise_loss import pairwise_loss_subclass_doc_string
7
+
8
+
9
+ @keras_rs_export("keras_rs.losses.PairwiseHingeLoss")
10
+ class PairwiseHingeLoss(PairwiseLoss):
11
+ def pairwise_loss(self, pairwise_logits: types.Tensor) -> types.Tensor:
12
+ return ops.relu(ops.subtract(ops.array(1), pairwise_logits))
13
+
14
+
15
+ formula = "loss = sum_{i} sum_{j} I(y_i > y_j) * max(0, 1 - (s_i - s_j))"
16
+ explanation = """
17
+ - `max(0, 1 - (s_i - s_j))` is the hinge loss, which penalizes cases where
18
+ the score difference `s_i - s_j` is not sufficiently large when
19
+ `y_i > y_j`.
20
+ """
21
+ extra_args = ""
22
+ example = """
23
+ 1. With `compile()` API:
24
+
25
+ ```python
26
+ model.compile(
27
+ loss=keras_rs.losses.PairwiseHingeLoss(),
28
+ ...
29
+ )
30
+ ```
31
+
32
+ 2. As a standalone function:
33
+ 2.1. Unbatched inputs
34
+ >>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
35
+ >>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
36
+ >>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
37
+ >>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
38
+ 2.32000
39
+
40
+ 2.2 Batched inputs
41
+ 2.2.1 Using default 'auto'/'sum_over_batch_size' reduction.
42
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
43
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
44
+ >>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
45
+ >>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
46
+ 0.75
47
+
48
+ 2.2.2. With masked inputs (useful for ragged inputs)
49
+ >>> y_true = {
50
+ ... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
51
+ ... "mask": np.array(
52
+ ... [[True, True, True, True], [True, True, False, False]]
53
+ ... ),
54
+ ... }
55
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
56
+ >>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
57
+ 0.64999
58
+
59
+ 2.2.3 With `sample_weight`
60
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
61
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
62
+ >>> sample_weight = np.array(
63
+ ... [[2.0, 3.0, 1.0, 1.0], [2.0, 1.0, 0.0, 0.0]]
64
+ ... )
65
+ >>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
66
+ >>> pairwise_hinge_loss(
67
+ ... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight
68
+ ... )
69
+ 1.02499
70
+
71
+ 2.2.4 Using `'none'` reduction.
72
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
73
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
74
+ >>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss(
75
+ ... reduction="none"
76
+ ... )
77
+ >>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
78
+ [[3. , 0. , 2. , 0.], [0., 0.20000005, 0.79999995, 0.]]
79
+ """
80
+
81
+ PairwiseHingeLoss.__doc__ = pairwise_loss_subclass_doc_string.format(
82
+ loss_name="hinge loss",
83
+ formula=formula,
84
+ explanation=explanation,
85
+ extra_args=extra_args,
86
+ example=example,
87
+ )
@@ -0,0 +1,96 @@
1
+ from keras import ops
2
+
3
+ from keras_rs.src import types
4
+ from keras_rs.src.api_export import keras_rs_export
5
+ from keras_rs.src.losses.pairwise_loss import PairwiseLoss
6
+ from keras_rs.src.losses.pairwise_loss import pairwise_loss_subclass_doc_string
7
+
8
+
9
+ @keras_rs_export("keras_rs.losses.PairwiseLogisticLoss")
10
+ class PairwiseLogisticLoss(PairwiseLoss):
11
+ def pairwise_loss(self, pairwise_logits: types.Tensor) -> types.Tensor:
12
+ return ops.add(
13
+ ops.relu(ops.negative(pairwise_logits)),
14
+ ops.log(
15
+ ops.add(
16
+ ops.array(1),
17
+ ops.exp(ops.negative(ops.abs(pairwise_logits))),
18
+ )
19
+ ),
20
+ )
21
+
22
+
23
+ formula = "loss = sum_{i} sum_{j} I(y_i > y_j) * log(1 + exp(-(s_i - s_j)))"
24
+ explanation = """
25
+ - `log(1 + exp(-(s_i - s_j)))` is the logistic loss, which penalizes
26
+ cases where the score difference `s_i - s_j` is not sufficiently large
27
+ when `y_i > y_j`. This function provides a smooth approximation of the
28
+ ideal step function, making it suitable for gradient-based optimization.
29
+ """
30
+ extra_args = ""
31
+ example = """
32
+ 1. With `compile()` API:
33
+
34
+ ```python
35
+ model.compile(
36
+ loss=keras_rs.losses.PairwiseLogisticLoss(),
37
+ ...
38
+ )
39
+ ```
40
+
41
+ 2. As a standalone function:
42
+ 2.1. Unbatched inputs
43
+ >>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
44
+ >>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
45
+ >>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
46
+ >>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
47
+ >>> 1.70708
48
+
49
+ 2.2 Batched inputs
50
+ 2.2.1 Using default 'auto'/'sum_over_batch_size' reduction.
51
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
52
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
53
+ >>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
54
+ >>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
55
+ 0.73936
56
+
57
+ 2.2.2. With masked inputs (useful for ragged inputs)
58
+ >>> y_true = {
59
+ ... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
60
+ ... "mask": np.array(
61
+ ... [[True, True, True, True], [True, True, False, False]]
62
+ ... ),
63
+ ... }
64
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
65
+ >>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
66
+ 0.53751
67
+
68
+ 2.2.3 With `sample_weight`
69
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
70
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
71
+ >>> sample_weight = np.array(
72
+ ... [[2.0, 3.0, 1.0, 1.0], [2.0, 1.0, 0.0, 0.0]]
73
+ ... )
74
+ >>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
75
+ >>> pairwise_logistic_loss(
76
+ ... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight
77
+ ... )
78
+ >>> 0.80337
79
+
80
+ 2.2.4 Using `'none'` reduction.
81
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
82
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
83
+ >>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss(
84
+ ... reduction="none"
85
+ ... )
86
+ >>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
87
+ [[2.126928, 0., 1.3132616, 0.48877698], [0., 0.20000005, 0.79999995, 0.]]
88
+ """
89
+
90
+ PairwiseLogisticLoss.__doc__ = pairwise_loss_subclass_doc_string.format(
91
+ loss_name="logistic loss",
92
+ formula=formula,
93
+ explanation=explanation,
94
+ extra_args=extra_args,
95
+ example=example,
96
+ )
@@ -154,5 +154,7 @@ pairwise_loss_subclass_doc_string = (
154
154
  `"float32"` unless set to different value
155
155
  (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
156
156
  provided, then the `compute_dtype` will be utilized.
157
- """
157
+
158
+ Examples:
159
+ {example}"""
158
160
  )
@@ -64,9 +64,69 @@ explanation = """
64
64
  predicted order of items relative to their true order.
65
65
  """
66
66
  extra_args = ""
67
+ example = """
68
+ 1. With `compile()` API:
69
+
70
+ ```python
71
+ model.compile(
72
+ loss=keras_rs.losses.PairwiseMeanSquaredError(),
73
+ ...
74
+ )
75
+ ```
76
+
77
+ 2. As a standalone function:
78
+ 2.1. Unbatched inputs
79
+ >>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
80
+ >>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
81
+ >>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError()
82
+ >>> pairwise_mse(y_true=y_true, y_pred=y_pred)
83
+ >>> 19.10400
84
+
85
+ 2.2 Batched inputs
86
+ 2.2.1 Using default 'auto'/'sum_over_batch_size' reduction.
87
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
88
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
89
+ >>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError()
90
+ >>> pairwise_mse(y_true=y_true, y_pred=y_pred)
91
+ 5.57999
92
+
93
+ 2.2.2. With masked inputs (useful for ragged inputs)
94
+ >>> y_true = {
95
+ ... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
96
+ ... "mask": np.array(
97
+ ... [[True, True, True, True], [True, True, False, False]]
98
+ ... ),
99
+ ... }
100
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
101
+ >>> pairwise_mse(y_true=y_true, y_pred=y_pred)
102
+ 4.76000
103
+
104
+ 2.2.3 With `sample_weight`
105
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
106
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
107
+ >>> sample_weight = np.array(
108
+ ... [[2.0, 3.0, 1.0, 1.0], [2.0, 1.0, 0.0, 0.0]]
109
+ ... )
110
+ >>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError()
111
+ >>> pairwise_mse(
112
+ ... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight
113
+ ... )
114
+ 11.0500
115
+
116
+ 2.2.4 Using `'none'` reduction.
117
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
118
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
119
+ >>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError(
120
+ ... reduction="none"
121
+ ... )
122
+ >>> pairwise_mse(y_true=y_true, y_pred=y_pred)
123
+ [[11., 17., 5., 5.], [2.04, 1.3199998, 1.6399999, 1.6399999]]
124
+ """
125
+
67
126
  PairwiseMeanSquaredError.__doc__ = pairwise_loss_subclass_doc_string.format(
68
127
  loss_name="mean squared error",
69
128
  formula=formula,
70
129
  explanation=explanation,
71
130
  extra_args=extra_args,
131
+ example=example,
72
132
  )