keras-rs-nightly 0.0.1.dev2025042803__py3-none-any.whl → 0.0.1.dev2025042903__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of keras-rs-nightly might be problematic. Click here for more details.

@@ -30,6 +30,53 @@ class DotInteraction(keras.layers.Layer):
30
30
  but is much slower.
31
31
  **kwargs: Args to pass to the base class.
32
32
 
33
+ Example:
34
+
35
+ ```python
36
+ # 1. Simple forward pass
37
+ batch_size = 2
38
+ embedding_dim = 32
39
+ feature1 = np.random.randn(batch_size, embedding_dim)
40
+ feature2 = np.random.randn(batch_size, embedding_dim)
41
+ feature3 = np.random.randn(batch_size, embedding_dim)
42
+ feature_interactions = keras_rs.layers.DotInteraction()(
43
+ [feature1, feature2, feature3]
44
+ )
45
+
46
+ # 2. After embedding layer in a model
47
+ vocabulary_size = 32
48
+ embedding_dim = 6
49
+
50
+ # Create a simple model containing the layer.
51
+ feature_input_1 = keras.Input(shape=(), name='indices_1', dtype="int32")
52
+ feature_input_2 = keras.Input(shape=(), name='indices_2', dtype="int32")
53
+ feature_input_3 = keras.Input(shape=(), name='indices_3', dtype="int32")
54
+ x1 = keras.layers.Embedding(
55
+ input_dim=vocabulary_size,
56
+ output_dim=embedding_dim
57
+ )(feature_input_1)
58
+ x2 = keras.layers.Embedding(
59
+ input_dim=vocabulary_size,
60
+ output_dim=embedding_dim
61
+ )(feature_input_2)
62
+ x3 = keras.layers.Embedding(
63
+ input_dim=vocabulary_size,
64
+ output_dim=embedding_dim
65
+ )(feature_input_3)
66
+ feature_interactions = keras_rs.layers.DotInteraction()([x1, x2, x3])
67
+ output = keras.layers.Dense(units=10)(x2)
68
+ model = keras.Model(
69
+ [feature_input_1, feature_input_2, feature_input_3], output
70
+ )
71
+
72
+ # Call the model on the inputs.
73
+ batch_size = 2
74
+ f1 = np.random.randint(0, vocabulary_size, size=(batch_size,))
75
+ f2 = np.random.randint(0, vocabulary_size, size=(batch_size,))
76
+ f3 = np.random.randint(0, vocabulary_size, size=(batch_size,))
77
+ outputs = model([f1, f2, f3])
78
+ ```
79
+
33
80
  References:
34
81
  - [M. Naumov et al.](https://arxiv.org/abs/1906.00091)
35
82
  """
@@ -57,13 +57,32 @@ class FeatureCross(keras.layers.Layer):
57
57
  Example:
58
58
 
59
59
  ```python
60
- # after embedding layer in a functional model
61
- input = keras.Input(shape=(), name='indices', dtype="int64")
62
- x0 = keras.layers.Embedding(input_dim=32, output_dim=6)(x0)
63
- x1 = FeatureCross()(x0, x0)
64
- x2 = FeatureCross()(x0, x1)
60
+ # 1. Simple forward pass
61
+ batch_size = 2
62
+ embedding_dim = 32
63
+ feature1 = np.random.randn(batch_size, embedding_dim)
64
+ feature2 = np.random.randn(batch_size, embedding_dim)
65
+ crossed_features = keras_rs.layers.FeatureCross()(feature1, feature2)
66
+
67
+ # 2. After embedding layer in a model
68
+ vocabulary_size = 32
69
+ embedding_dim = 6
70
+
71
+ # Create a simple model containing the layer.
72
+ inputs = keras.Input(shape=(), name='indices', dtype="int32")
73
+ x0 = keras.layers.Embedding(
74
+ input_dim=vocabulary_size,
75
+ output_dim=embedding_dim
76
+ )(inputs)
77
+ x1 = keras_rs.layers.FeatureCross()(x0, x0)
78
+ x2 = keras_rs.layers.FeatureCross()(x0, x1)
65
79
  logits = keras.layers.Dense(units=10)(x2)
66
- model = keras.Model(input, logits)
80
+ model = keras.Model(inputs, logits)
81
+
82
+ # Call the model on the inputs.
83
+ batch_size = 2
84
+ input_data = np.random.randint(0, vocabulary_size, size=(batch_size,))
85
+ outputs = model(input_data)
67
86
  ```
68
87
 
69
88
  References:
@@ -12,11 +12,27 @@ MAX_FLOAT = ml_dtypes.finfo("float32").max / 100.0
12
12
 
13
13
  @keras_rs_export("keras_rs.layers.HardNegativeMining")
14
14
  class HardNegativeMining(keras.layers.Layer):
15
- """Transforms logits and labels to return hard negatives.
15
+ """Filter logits and labels to return hard negatives.
16
+
17
+ The output will include logits and labels for the requested number of hard
18
+ negatives as well as the positive candidate.
16
19
 
17
20
  Args:
18
21
  num_hard_negatives: How many hard negatives to return.
19
22
  **kwargs: Args to pass to the base class.
23
+
24
+ Example:
25
+
26
+ ```python
27
+ # Create layer with the configured number of hard negatives to mine.
28
+ hard_negative_mining = keras_rs.layers.HardNegativeMining(
29
+ num_hard_negatives=10
30
+ )
31
+
32
+ # This will retrieve the top 10 negative candidates plus the positive
33
+ # candidate from `labels` for each row.
34
+ out_logits, out_labels = hard_negative_mining(in_logits, in_labels)
35
+ ```
20
36
  """
21
37
 
22
38
  def __init__(self, num_hard_negatives: int, **kwargs: Any) -> None:
@@ -33,12 +49,13 @@ class HardNegativeMining(keras.layers.Layer):
33
49
  negatives as well as the positive candidate.
34
50
 
35
51
  Args:
36
- logits: logits tensor, typically `[batch_size, num_candidates]` but
37
- can have more dimensions or be 1D as `[num_candidates]`.
38
- labels: one-hot labels tensor, must be the same shape as `logits`.
52
+ logits: The logits tensor, typically `[batch_size, num_candidates]`
53
+ but can have more dimensions or be 1D as `[num_candidates]`.
54
+ labels: The one-hot labels tensor, must be the same shape as
55
+ `logits`.
39
56
 
40
57
  Returns:
41
- tuple containing two tensors with the last dimension of
58
+ A tuple containing two tensors with the last dimension of
42
59
  `num_candidates` replaced with `num_hard_negatives + 1`.
43
60
  - logits: `[..., num_hard_negatives + 1]` tensor of logits.
44
61
  - labels: `[..., num_hard_negatives + 1]` one-hot tensor of labels.
@@ -15,6 +15,18 @@ class RemoveAccidentalHits(keras.layers.Layer):
15
15
 
16
16
  Zeroes the logits of negative candidates that have the same ID as the
17
17
  positive candidate in that row.
18
+
19
+ Example:
20
+
21
+ ```python
22
+ # Create layer with the configured number of hard negatives to mine.
23
+ remove_accidental_hits = keras_rs.layers.RemoveAccidentalHits()
24
+
25
+ # This will zero the logits of negative candidates that have the same ID as
26
+ # the positive candidate from `labels` so as to not negatively impact the
27
+ # true positive.
28
+ logits = remove_accidental_hits(logits, labels, candidate_ids)
29
+ ```
18
30
  """
19
31
 
20
32
  def call(
@@ -29,16 +41,17 @@ class RemoveAccidentalHits(keras.layers.Layer):
29
41
  have the same ID as the positive candidate in that row.
30
42
 
31
43
  Args:
32
- logits: logits tensor, typically `[batch_size, num_candidates]` but
33
- can have more dimensions or be 1D as `[num_candidates]`.
34
- labels: one-hot labels tensor, must be the same shape as `logits`.
35
- candidate_ids: candidate identifiers tensor, can be
44
+ logits: The logits tensor, typically `[batch_size, num_candidates]`
45
+ but can have more dimensions or be 1D as `[num_candidates]`.
46
+ labels: The one-hot labels tensor, must be the same shape as
47
+ `logits`.
48
+ candidate_ids: The candidate identifiers tensor, can be
36
49
  `[num_candidates]` or `[batch_size, num_candidates]` or have
37
50
  more dimensions as long as they match the last dimensions of
38
51
  `labels`.
39
52
 
40
53
  Returns:
41
- logits: Modified logits with the same shape as the input logits.
54
+ logits: The modified logits with the same shape as the input logits.
42
55
  """
43
56
  # A more principled way is to implement
44
57
  # `softmax_cross_entropy_with_logits` with a input mask. Here we
@@ -17,6 +17,18 @@ class SamplingProbabilityCorrection(keras.layers.Layer):
17
17
  epsilon: float. Small float added to sampling probability to avoid
18
18
  taking the log of zero. Defaults to 1e-6.
19
19
  **kwargs: Args to pass to the base class.
20
+
21
+ Example:
22
+
23
+ ```python
24
+ # Create the layer.
25
+ sampling_probability_correction = (
26
+ keras_rs.layers.SamplingProbabilityCorrection()
27
+ )
28
+
29
+ # Correct the logits based on the provided candidate sampling probability.
30
+ logits = sampling_probability_correction(logits, probabilities)
31
+ ```
20
32
  """
21
33
 
22
34
  def __init__(self, epsilon: float = 1e-6, **kwargs: Any) -> None:
@@ -32,11 +44,13 @@ class SamplingProbabilityCorrection(keras.layers.Layer):
32
44
  """Corrects input logits to account for candidate sampling probability.
33
45
 
34
46
  Args:
35
- logits: The logits to correct.
36
- candidate_sampling_probability: The sampling probability.
47
+ logits: The logits tensor to correct, typically
48
+ `[batch_size, num_candidates]` but can have more dimensions or
49
+ be 1D as `[num_candidates]`.
50
+ candidate_sampling_probability: The sampling probability with the
51
+ same shape as `logits`.
37
52
 
38
- Returns:
39
- The corrected logits.
53
+ Returns: The corrected logits with the same shape as the input logits.
40
54
  """
41
55
  return logits - ops.log(
42
56
  ops.clip(candidate_sampling_probability, self.epsilon, 1.0)
@@ -19,9 +19,69 @@ explanation = """
19
19
  `y_i > y_j`.
20
20
  """
21
21
  extra_args = ""
22
+ example = """
23
+ 1. With `compile()` API:
24
+
25
+ ```python
26
+ model.compile(
27
+ loss=keras_rs.losses.PairwiseHingeLoss(),
28
+ ...
29
+ )
30
+ ```
31
+
32
+ 2. As a standalone function:
33
+ 2.1. Unbatched inputs
34
+ >>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
35
+ >>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
36
+ >>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
37
+ >>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
38
+ 2.32000
39
+
40
+ 2.2 Batched inputs
41
+ 2.2.1 Using default 'auto'/'sum_over_batch_size' reduction.
42
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
43
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
44
+ >>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
45
+ >>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
46
+ 0.75
47
+
48
+ 2.2.2. With masked inputs (useful for ragged inputs)
49
+ >>> y_true = {
50
+ ... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
51
+ ... "mask": np.array(
52
+ ... [[True, True, True, True], [True, True, False, False]]
53
+ ... ),
54
+ ... }
55
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
56
+ >>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
57
+ 0.64999
58
+
59
+ 2.2.3 With `sample_weight`
60
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
61
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
62
+ >>> sample_weight = np.array(
63
+ ... [[2.0, 3.0, 1.0, 1.0], [2.0, 1.0, 0.0, 0.0]]
64
+ ... )
65
+ >>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
66
+ >>> pairwise_hinge_loss(
67
+ ... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight
68
+ ... )
69
+ 1.02499
70
+
71
+ 2.2.4 Using `'none'` reduction.
72
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
73
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
74
+ >>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss(
75
+ ... reduction="none"
76
+ ... )
77
+ >>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
78
+ [[3. , 0. , 2. , 0.], [0., 0.20000005, 0.79999995, 0.]]
79
+ """
80
+
22
81
  PairwiseHingeLoss.__doc__ = pairwise_loss_subclass_doc_string.format(
23
82
  loss_name="hinge loss",
24
83
  formula=formula,
25
84
  explanation=explanation,
26
85
  extra_args=extra_args,
86
+ example=example,
27
87
  )
@@ -28,9 +28,69 @@ explanation = """
28
28
  ideal step function, making it suitable for gradient-based optimization.
29
29
  """
30
30
  extra_args = ""
31
+ example = """
32
+ 1. With `compile()` API:
33
+
34
+ ```python
35
+ model.compile(
36
+ loss=keras_rs.losses.PairwiseLogisticLoss(),
37
+ ...
38
+ )
39
+ ```
40
+
41
+ 2. As a standalone function:
42
+ 2.1. Unbatched inputs
43
+ >>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
44
+ >>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
45
+ >>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
46
+ >>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
47
+ >>> 1.70708
48
+
49
+ 2.2 Batched inputs
50
+ 2.2.1 Using default 'auto'/'sum_over_batch_size' reduction.
51
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
52
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
53
+ >>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
54
+ >>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
55
+ 0.73936
56
+
57
+ 2.2.2. With masked inputs (useful for ragged inputs)
58
+ >>> y_true = {
59
+ ... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
60
+ ... "mask": np.array(
61
+ ... [[True, True, True, True], [True, True, False, False]]
62
+ ... ),
63
+ ... }
64
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
65
+ >>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
66
+ 0.53751
67
+
68
+ 2.2.3 With `sample_weight`
69
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
70
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
71
+ >>> sample_weight = np.array(
72
+ ... [[2.0, 3.0, 1.0, 1.0], [2.0, 1.0, 0.0, 0.0]]
73
+ ... )
74
+ >>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
75
+ >>> pairwise_logistic_loss(
76
+ ... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight
77
+ ... )
78
+ >>> 0.80337
79
+
80
+ 2.2.4 Using `'none'` reduction.
81
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
82
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
83
+ >>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss(
84
+ ... reduction="none"
85
+ ... )
86
+ >>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
87
+ [[2.126928, 0., 1.3132616, 0.48877698], [0., 0.20000005, 0.79999995, 0.]]
88
+ """
89
+
31
90
  PairwiseLogisticLoss.__doc__ = pairwise_loss_subclass_doc_string.format(
32
91
  loss_name="logistic loss",
33
92
  formula=formula,
34
93
  explanation=explanation,
35
94
  extra_args=extra_args,
95
+ example=example,
36
96
  )
@@ -154,5 +154,7 @@ pairwise_loss_subclass_doc_string = (
154
154
  `"float32"` unless set to different value
155
155
  (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
156
156
  provided, then the `compute_dtype` will be utilized.
157
- """
157
+
158
+ Examples:
159
+ {example}"""
158
160
  )
@@ -64,9 +64,69 @@ explanation = """
64
64
  predicted order of items relative to their true order.
65
65
  """
66
66
  extra_args = ""
67
+ example = """
68
+ 1. With `compile()` API:
69
+
70
+ ```python
71
+ model.compile(
72
+ loss=keras_rs.losses.PairwiseMeanSquaredError(),
73
+ ...
74
+ )
75
+ ```
76
+
77
+ 2. As a standalone function:
78
+ 2.1. Unbatched inputs
79
+ >>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
80
+ >>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
81
+ >>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError()
82
+ >>> pairwise_mse(y_true=y_true, y_pred=y_pred)
83
+ >>> 19.10400
84
+
85
+ 2.2 Batched inputs
86
+ 2.2.1 Using default 'auto'/'sum_over_batch_size' reduction.
87
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
88
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
89
+ >>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError()
90
+ >>> pairwise_mse(y_true=y_true, y_pred=y_pred)
91
+ 5.57999
92
+
93
+ 2.2.2. With masked inputs (useful for ragged inputs)
94
+ >>> y_true = {
95
+ ... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
96
+ ... "mask": np.array(
97
+ ... [[True, True, True, True], [True, True, False, False]]
98
+ ... ),
99
+ ... }
100
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
101
+ >>> pairwise_mse(y_true=y_true, y_pred=y_pred)
102
+ 4.76000
103
+
104
+ 2.2.3 With `sample_weight`
105
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
106
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
107
+ >>> sample_weight = np.array(
108
+ ... [[2.0, 3.0, 1.0, 1.0], [2.0, 1.0, 0.0, 0.0]]
109
+ ... )
110
+ >>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError()
111
+ >>> pairwise_mse(
112
+ ... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight
113
+ ... )
114
+ 11.0500
115
+
116
+ 2.2.4 Using `'none'` reduction.
117
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
118
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
119
+ >>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError(
120
+ ... reduction="none"
121
+ ... )
122
+ >>> pairwise_mse(y_true=y_true, y_pred=y_pred)
123
+ [[11., 17., 5., 5.], [2.04, 1.3199998, 1.6399999, 1.6399999]]
124
+ """
125
+
67
126
  PairwiseMeanSquaredError.__doc__ = pairwise_loss_subclass_doc_string.format(
68
127
  loss_name="mean squared error",
69
128
  formula=formula,
70
129
  explanation=explanation,
71
130
  extra_args=extra_args,
131
+ example=example,
72
132
  )
@@ -24,9 +24,72 @@ explanation = """
24
24
  suitable for gradient-based optimization.
25
25
  """
26
26
  extra_args = ""
27
+ example = """
28
+ 1. With `compile()` API:
29
+
30
+ ```python
31
+ model.compile(
32
+ loss=keras_rs.losses.PairwiseSoftZeroOneLoss(),
33
+ ...
34
+ )
35
+ ```
36
+
37
+ 2. As a standalone function:
38
+ 2.1. Unbatched inputs
39
+ >>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
40
+ >>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
41
+ >>> pairwise_soft_zero_one_loss = keras_rs.losses.PairwiseSoftZeroOneLoss()
42
+ >>> pairwise_soft_zero_one_loss(y_true=y_true, y_pred=y_pred)
43
+ 0.86103
44
+
45
+ 2.2 Batched inputs
46
+ 2.2.1 Using default 'auto'/'sum_over_batch_size' reduction.
47
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
48
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
49
+ >>> pairwise_soft_zero_one_loss = keras_rs.losses.PairwiseSoftZeroOneLoss()
50
+ >>> pairwise_soft_zero_one_loss(y_true=y_true, y_pred=y_pred)
51
+ 0.46202
52
+
53
+ 2.2.2. With masked inputs (useful for ragged inputs)
54
+ >>> y_true = {
55
+ ... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
56
+ ... "mask": np.array(
57
+ ... [[True, True, True, True], [True, True, False, False]]
58
+ ... ),
59
+ ... }
60
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
61
+ >>> pairwise_soft_zero_one_loss(y_true=y_true, y_pred=y_pred)
62
+ 0.29468
63
+
64
+ 2.2.3 With `sample_weight`
65
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
66
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
67
+ >>> sample_weight = np.array(
68
+ ... [[2.0, 3.0, 1.0, 1.0], [2.0, 1.0, 0.0, 0.0]]
69
+ ... )
70
+ >>> pairwise_soft_zero_one_loss = keras_rs.losses.PairwiseSoftZeroOneLoss()
71
+ >>> pairwise_soft_zero_one_loss(
72
+ ... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight
73
+ ... )
74
+ 0.40478
75
+
76
+ 2.2.4 Using `'none'` reduction.
77
+ >>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
78
+ >>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
79
+ >>> pairwise_soft_zero_one_loss = keras_rs.losses.PairwiseSoftZeroOneLoss(
80
+ ... reduction="none"
81
+ ... )
82
+ >>> pairwise_soft_zero_one_loss(y_true=y_true, y_pred=y_pred)
83
+ [
84
+ [0.8807971 , 0., 0.73105854, 0.43557024],
85
+ [0., 0.31002545, 0.7191075 , 0.61961967]
86
+ ]
87
+ """
88
+
27
89
  PairwiseSoftZeroOneLoss.__doc__ = pairwise_loss_subclass_doc_string.format(
28
90
  loss_name="soft zero-one loss",
29
91
  formula=formula,
30
92
  explanation=explanation,
31
93
  extra_args=extra_args,
94
+ example=example,
32
95
  )
@@ -11,7 +11,7 @@ from keras_rs.src.metrics.ranking_metric import (
11
11
  ranking_metric_subclass_doc_string,
12
12
  )
13
13
  from keras_rs.src.metrics.ranking_metric import (
14
- ranking_metric_subclass_doc_string_args,
14
+ ranking_metric_subclass_doc_string_post_desc,
15
15
  )
16
16
  from keras_rs.src.metrics.ranking_metrics_utils import compute_dcg
17
17
  from keras_rs.src.metrics.ranking_metrics_utils import default_gain_fn
@@ -127,6 +127,25 @@ extra_args = """
127
127
  rank_discount_fn: function. Maps rank positions to discount
128
128
  values. The default (`default_rank_discount_fn`) implements
129
129
  `1 / log2(rank + 1)`."""
130
+ example = """
131
+ >>> batch_size = 2
132
+ >>> list_size = 5
133
+ >>> labels = np.random.randint(0, 3, size=(batch_size, list_size))
134
+ >>> scores = np.random.random(size=(batch_size, list_size))
135
+ >>> metric = keras_rs.metrics.DCG()(
136
+ ... y_true=labels, y_pred=scores
137
+ ... )
138
+
139
+ # Mask certain elements (can be used for uneven inputs)
140
+ >>> batch_size = 2
141
+ >>> list_size = 5
142
+ >>> labels = np.random.randint(0, 3, size=(batch_size, list_size))
143
+ >>> scores = np.random.random(size=(batch_size, list_size))
144
+ >>> mask = np.random.randint(0, 2, size=(batch_size, list_size), dtype=bool)
145
+ >>> metric = keras_rs.metrics.DCG()(
146
+ ... y_true={"labels": labels, "mask": mask}, y_pred=scores
147
+ ... )
148
+ """
130
149
 
131
150
  DCG.__doc__ = format_docstring(
132
151
  ranking_metric_subclass_doc_string,
@@ -137,4 +156,6 @@ DCG.__doc__ = format_docstring(
137
156
  relevance_type=relevance_type,
138
157
  score_range_interpretation=score_range_interpretation,
139
158
  formula=formula,
140
- ) + ranking_metric_subclass_doc_string_args.format(extra_args=extra_args)
159
+ ) + ranking_metric_subclass_doc_string_post_desc.format(
160
+ extra_args=extra_args, example=example
161
+ )
@@ -7,7 +7,7 @@ from keras_rs.src.metrics.ranking_metric import (
7
7
  ranking_metric_subclass_doc_string,
8
8
  )
9
9
  from keras_rs.src.metrics.ranking_metric import (
10
- ranking_metric_subclass_doc_string_args,
10
+ ranking_metric_subclass_doc_string_post_desc,
11
11
  )
12
12
  from keras_rs.src.metrics.ranking_metrics_utils import get_list_weights
13
13
  from keras_rs.src.metrics.ranking_metrics_utils import sort_by_scores
@@ -99,6 +99,25 @@ where:
99
99
  original list `y`.
100
100
  """
101
101
  extra_args = ""
102
+ example = """
103
+ >>> batch_size = 2
104
+ >>> list_size = 5
105
+ >>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
106
+ >>> scores = np.random.random(size=(batch_size, list_size))
107
+ >>> metric = keras_rs.metrics.MeanAveragePrecision()(
108
+ ... y_true=labels, y_pred=scores
109
+ ... )
110
+
111
+ # Mask certain elements (can be used for uneven inputs)
112
+ >>> batch_size = 2
113
+ >>> list_size = 5
114
+ >>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
115
+ >>> scores = np.random.random(size=(batch_size, list_size))
116
+ >>> mask = np.random.randint(0, 2, size=(batch_size, list_size), dtype=bool)
117
+ >>> metric = keras_rs.metrics.MeanAveragePrecision()(
118
+ ... y_true={"labels": labels, "mask": mask}, y_pred=scores
119
+ ... )
120
+ """
102
121
 
103
122
  MeanAveragePrecision.__doc__ = format_docstring(
104
123
  ranking_metric_subclass_doc_string,
@@ -109,4 +128,6 @@ MeanAveragePrecision.__doc__ = format_docstring(
109
128
  relevance_type=relevance_type,
110
129
  score_range_interpretation=score_range_interpretation,
111
130
  formula=formula,
112
- ) + ranking_metric_subclass_doc_string_args.format(extra_args=extra_args)
131
+ ) + ranking_metric_subclass_doc_string_post_desc.format(
132
+ extra_args=extra_args, example=example
133
+ )
@@ -7,7 +7,7 @@ from keras_rs.src.metrics.ranking_metric import (
7
7
  ranking_metric_subclass_doc_string,
8
8
  )
9
9
  from keras_rs.src.metrics.ranking_metric import (
10
- ranking_metric_subclass_doc_string_args,
10
+ ranking_metric_subclass_doc_string_post_desc,
11
11
  )
12
12
  from keras_rs.src.metrics.ranking_metrics_utils import get_list_weights
13
13
  from keras_rs.src.metrics.ranking_metrics_utils import sort_by_scores
@@ -86,6 +86,26 @@ formula = """```
86
86
  MRR(y, s) = max_{i} y_{i} / rank(s_{i})
87
87
  ```"""
88
88
  extra_args = ""
89
+ example = """
90
+ >>> batch_size = 2
91
+ >>> list_size = 5
92
+ >>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
93
+ >>> scores = np.random.random(size=(batch_size, list_size))
94
+ >>> metric = keras_rs.metrics.MeanReciprocalRank()(
95
+ ... y_true=labels, y_pred=scores
96
+ ... )
97
+
98
+ # Mask certain elements (can be used for uneven inputs)
99
+ >>> batch_size = 2
100
+ >>> list_size = 5
101
+ >>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
102
+ >>> scores = np.random.random(size=(batch_size, list_size))
103
+ >>> mask = np.random.randint(0, 2, size=(batch_size, list_size), dtype=bool)
104
+ >>> metric = keras_rs.metrics.MeanReciprocalRank()(
105
+ ... y_true={"labels": labels, "mask": mask}, y_pred=scores
106
+ ... )
107
+ """
108
+
89
109
  MeanReciprocalRank.__doc__ = format_docstring(
90
110
  ranking_metric_subclass_doc_string,
91
111
  width=80,
@@ -95,4 +115,6 @@ MeanReciprocalRank.__doc__ = format_docstring(
95
115
  relevance_type=relevance_type,
96
116
  score_range_interpretation=score_range_interpretation,
97
117
  formula=formula,
98
- ) + ranking_metric_subclass_doc_string_args.format(extra_args=extra_args)
118
+ ) + ranking_metric_subclass_doc_string_post_desc.format(
119
+ extra_args=extra_args, example=example
120
+ )
@@ -11,7 +11,7 @@ from keras_rs.src.metrics.ranking_metric import (
11
11
  ranking_metric_subclass_doc_string,
12
12
  )
13
13
  from keras_rs.src.metrics.ranking_metric import (
14
- ranking_metric_subclass_doc_string_args,
14
+ ranking_metric_subclass_doc_string_post_desc,
15
15
  )
16
16
  from keras_rs.src.metrics.ranking_metrics_utils import compute_dcg
17
17
  from keras_rs.src.metrics.ranking_metrics_utils import default_gain_fn
@@ -170,6 +170,25 @@ rank_discount_fn: callable. Maps rank positions (1-based) to discount
170
170
  values. The default (`default_rank_discount_fn`) typically implements
171
171
  `1 / log2(rank + 1)`. Used for both DCG and IDCG.
172
172
  """
173
+ example = """
174
+ >>> batch_size = 2
175
+ >>> list_size = 5
176
+ >>> labels = np.random.randint(0, 3, size=(batch_size, list_size))
177
+ >>> scores = np.random.random(size=(batch_size, list_size))
178
+ >>> metric = keras_rs.metrics.NDCG()(
179
+ ... y_true=labels, y_pred=scores
180
+ ... )
181
+
182
+ # Mask certain elements (can be used for uneven inputs)
183
+ >>> batch_size = 2
184
+ >>> list_size = 5
185
+ >>> labels = np.random.randint(0, 3, size=(batch_size, list_size))
186
+ >>> scores = np.random.random(size=(batch_size, list_size))
187
+ >>> mask = np.random.randint(0, 2, size=(batch_size, list_size), dtype=bool)
188
+ >>> metric = keras_rs.metrics.NDCG()(
189
+ ... y_true={"labels": labels, "mask": mask}, y_pred=scores
190
+ ... )
191
+ """
173
192
 
174
193
  NDCG.__doc__ = format_docstring(
175
194
  ranking_metric_subclass_doc_string,
@@ -181,4 +200,6 @@ NDCG.__doc__ = format_docstring(
181
200
  score_range_interpretation=score_range_interpretation,
182
201
  formula=formula,
183
202
  extra_args=extra_args,
184
- ) + ranking_metric_subclass_doc_string_args.format(extra_args=extra_args)
203
+ ) + ranking_metric_subclass_doc_string_post_desc.format(
204
+ extra_args=extra_args, example=example
205
+ )
@@ -7,7 +7,7 @@ from keras_rs.src.metrics.ranking_metric import (
7
7
  ranking_metric_subclass_doc_string,
8
8
  )
9
9
  from keras_rs.src.metrics.ranking_metric import (
10
- ranking_metric_subclass_doc_string_args,
10
+ ranking_metric_subclass_doc_string_post_desc,
11
11
  )
12
12
  from keras_rs.src.metrics.ranking_metrics_utils import get_list_weights
13
13
  from keras_rs.src.metrics.ranking_metrics_utils import sort_by_scores
@@ -82,6 +82,26 @@ P@k(y, s) = 1/k sum_i I[rank(s_i) < k] y_i
82
82
  where `y_i` is the relevance label (0/1) of the item ranked at position
83
83
  `i`, and `I[condition]` is 1 if the condition is met, otherwise 0."""
84
84
  extra_args = ""
85
+ example = """
86
+ >>> batch_size = 2
87
+ >>> list_size = 5
88
+ >>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
89
+ >>> scores = np.random.random(size=(batch_size, list_size))
90
+ >>> metric = keras_rs.metrics.PrecisionAtK()(
91
+ ... y_true=labels, y_pred=scores
92
+ ... )
93
+
94
+ # Mask certain elements (can be used for uneven inputs)
95
+ >>> batch_size = 2
96
+ >>> list_size = 5
97
+ >>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
98
+ >>> scores = np.random.random(size=(batch_size, list_size))
99
+ >>> mask = np.random.randint(0, 2, size=(batch_size, list_size), dtype=bool)
100
+ >>> metric = keras_rs.metrics.PrecisionAtK()(
101
+ ... y_true={"labels": labels, "mask": mask}, y_pred=scores
102
+ ... )
103
+ """
104
+
85
105
  PrecisionAtK.__doc__ = format_docstring(
86
106
  ranking_metric_subclass_doc_string,
87
107
  width=80,
@@ -91,4 +111,6 @@ PrecisionAtK.__doc__ = format_docstring(
91
111
  relevance_type=relevance_type,
92
112
  score_range_interpretation=score_range_interpretation,
93
113
  formula=formula,
94
- ) + ranking_metric_subclass_doc_string_args.format(extra_args=extra_args)
114
+ ) + ranking_metric_subclass_doc_string_post_desc.format(
115
+ extra_args=extra_args, example=example
116
+ )
@@ -235,7 +235,7 @@ to get 1D weights. For more details, refer to
235
235
  `keras_rs.src.metrics.ranking_metrics_utils.get_list_weights`.
236
236
  """
237
237
 
238
- ranking_metric_subclass_doc_string_args = """
238
+ ranking_metric_subclass_doc_string_post_desc = """
239
239
 
240
240
  Args:{extra_args}
241
241
  k: int. The number of top-ranked items to consider (the 'k' in 'top-k').
@@ -249,4 +249,7 @@ ranking_metric_subclass_doc_string_args = """
249
249
  `"float32"` unless set to different value
250
250
  (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
251
251
  provided, then the `compute_dtype` will be utilized.
252
+
253
+ Example:
254
+ {example}
252
255
  """
@@ -7,7 +7,7 @@ from keras_rs.src.metrics.ranking_metric import (
7
7
  ranking_metric_subclass_doc_string,
8
8
  )
9
9
  from keras_rs.src.metrics.ranking_metric import (
10
- ranking_metric_subclass_doc_string_args,
10
+ ranking_metric_subclass_doc_string_post_desc,
11
11
  )
12
12
  from keras_rs.src.metrics.ranking_metrics_utils import get_list_weights
13
13
  from keras_rs.src.metrics.ranking_metrics_utils import sort_by_scores
@@ -73,6 +73,26 @@ R@k(y, s) = sum_i I[rank(s_i) < k] y_i / sum_j y_j
73
73
  where `y_i` is the relevance label (0/1) of the item ranked at position
74
74
  `i`, `I[condition]` is 1 if the condition is met, otherwise 0."""
75
75
  extra_args = ""
76
+ example = """
77
+ >>> batch_size = 2
78
+ >>> list_size = 5
79
+ >>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
80
+ >>> scores = np.random.random(size=(batch_size, list_size))
81
+ >>> metric = keras_rs.metrics.RecallAtK()(
82
+ ... y_true=labels, y_pred=scores
83
+ ... )
84
+
85
+ # Mask certain elements (can be used for uneven inputs)
86
+ >>> batch_size = 2
87
+ >>> list_size = 5
88
+ >>> labels = np.random.randint(0, 2, size=(batch_size, list_size))
89
+ >>> scores = np.random.random(size=(batch_size, list_size))
90
+ >>> mask = np.random.randint(0, 2, size=(batch_size, list_size), dtype=bool)
91
+ >>> metric = keras_rs.metrics.RecallAtK()(
92
+ ... y_true={"labels": labels, "mask": mask}, y_pred=scores
93
+ ... )
94
+ """
95
+
76
96
  RecallAtK.__doc__ = format_docstring(
77
97
  ranking_metric_subclass_doc_string,
78
98
  width=80,
@@ -82,4 +102,6 @@ RecallAtK.__doc__ = format_docstring(
82
102
  relevance_type=relevance_type,
83
103
  score_range_interpretation=score_range_interpretation,
84
104
  formula=formula,
85
- ) + ranking_metric_subclass_doc_string_args.format(extra_args=extra_args)
105
+ ) + ranking_metric_subclass_doc_string_post_desc.format(
106
+ extra_args=extra_args, example=example
107
+ )
keras_rs/src/version.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from keras_rs.src.api_export import keras_rs_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "0.0.1.dev2025042803"
4
+ __version__ = "0.0.1.dev2025042903"
5
5
 
6
6
 
7
7
  @keras_rs_export("keras_rs.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: keras-rs-nightly
3
- Version: 0.0.1.dev2025042803
3
+ Version: 0.0.1.dev2025042903
4
4
  Summary: Multi-backend recommender systems with Keras 3.
5
5
  Author-email: Keras team <keras-users@googlegroups.com>
6
6
  License: Apache License 2.0
@@ -36,6 +36,80 @@ This library is an extension of the core Keras API; all high-level modules
36
36
  receive that same level of polish as core Keras. If you are familiar with Keras,
37
37
  congratulations! You already understand most of Keras Recommenders.
38
38
 
39
+ ## Quick Links
40
+
41
+ - [Home page](https://keras.io/keras_rs)
42
+ - [Examples](https://keras.io/keras_rs/examples)
43
+ - [API documentation](https://keras.io/keras_rs/api)
44
+
45
+ ## Quickstart
46
+
47
+ ### Train your own cross network
48
+
49
+ Choose a backend:
50
+
51
+ ```python
52
+ import os
53
+ os.environ["KERAS_BACKEND"] = "jax" # Or "tensorflow" or "torch"!
54
+ ```
55
+
56
+ Import KerasRS and other libraries:
57
+
58
+ ```python
59
+ import keras
60
+ import keras_rs
61
+ import numpy as np
62
+ ```
63
+
64
+ Define a simple model using the `FeatureCross` layer:
65
+
66
+ ```python
67
+ vocabulary_size = 32
68
+ embedding_dim = 6
69
+
70
+ inputs = keras.Input(shape=(), name='indices', dtype="int32")
71
+ x0 = keras.layers.Embedding(
72
+ input_dim=vocabulary_size,
73
+ output_dim=embedding_dim
74
+ )(inputs)
75
+ x1 = keras_rs.layers.FeatureCross()(x0, x0)
76
+ x2 = keras_rs.layers.FeatureCross()(x0, x1)
77
+ output = keras.layers.Dense(units=10)(x2)
78
+ model = keras.Model(inputs, output)
79
+ ```
80
+
81
+ Compile the model:
82
+
83
+ ```python
84
+ model.compile(
85
+ loss=keras.losses.MeanSquaredError(),
86
+ optimizer=keras.optimizers.Adam(learning_rate=3e-4)
87
+ )
88
+ ```
89
+
90
+ Call `model.fit()` on dummy data:
91
+
92
+ ```python
93
+ batch_size = 2
94
+ x = np.random.randint(0, vocabulary_size, size=(batch_size,))
95
+ y = np.random.random(size=(batch_size,))
96
+ model.fit(input_data, y=y)
97
+ ```
98
+
99
+ ### Use ranking losses and metrics
100
+
101
+ If your task is to rank items in a list, you can make use of the ranking losses
102
+ and metrics which KerasRS provides. Below, we use the pairwise hinge loss and
103
+ track the nDCG metric:
104
+
105
+ ```python
106
+ model.compile(
107
+ loss=keras_rs.losses.PairwiseHingeLoss(),
108
+ metrics=[keras_rs.metrics.NDCG()],
109
+ optimizer=keras.optimizers.Adam(learning_rate=3e-4),
110
+ )
111
+ ```
112
+
39
113
  ## Installation
40
114
 
41
115
  Keras Recommenders is available on PyPI as `keras-rs`:
@@ -5,38 +5,38 @@ keras_rs/metrics/__init__.py,sha256=Qxpf6OFooIL9TIn2l3WgOea3HFRG0hq02glPAxtMZ9c,
5
5
  keras_rs/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  keras_rs/src/api_export.py,sha256=RsmG-DvO-cdFeAF9W6LRzms0kvtm-Yp9BAA_d-952zI,510
7
7
  keras_rs/src/types.py,sha256=UyOdgjqrqg_b58opnY8n6gTiDHKVR8z_bmEruehERBk,514
8
- keras_rs/src/version.py,sha256=7yE4X2uVxePL-l4daVZqTqrjFfoFLjBVYZA4sczaeoM,222
8
+ keras_rs/src/version.py,sha256=hcIfcFxzcSIAxV3xPiLBwz0zpbWX7F3lHDOK6d9yI7s,222
9
9
  keras_rs/src/layers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  keras_rs/src/layers/feature_interaction/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
- keras_rs/src/layers/feature_interaction/dot_interaction.py,sha256=jGHcg0EiWxth6LTxG2yWgHcyx_GXrxvA61uQqpPfnDQ,6900
12
- keras_rs/src/layers/feature_interaction/feature_cross.py,sha256=5OCSI0vFYzJNmgkKcuHIbVv8U2q3UvS80-qZjPimDjM,8155
11
+ keras_rs/src/layers/feature_interaction/dot_interaction.py,sha256=bRLz03_8VaYLNG4gbIKCzsSc26shKMmzmwCs8SujezE,8542
12
+ keras_rs/src/layers/feature_interaction/feature_cross.py,sha256=rViVlJOGYG2f-uKTDQH7MdX2syRzIMkYYtAQUjz6F-0,8755
13
13
  keras_rs/src/layers/retrieval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
14
  keras_rs/src/layers/retrieval/brute_force_retrieval.py,sha256=izdppBXxJH0KqYEg7Zsr-SL-SHgAmnFopXMPalEO3uw,5676
15
- keras_rs/src/layers/retrieval/hard_negative_mining.py,sha256=IWFrbw1h9z3AUw4oUBKf5_Aud4MTHO_AKdHfoyFa5As,3031
16
- keras_rs/src/layers/retrieval/remove_accidental_hits.py,sha256=Z84z2YgKspKeNdc5id8lf9TAyFsbCCz3acJxiKXYipc,3324
15
+ keras_rs/src/layers/retrieval/hard_negative_mining.py,sha256=FLcN_lPJrwuYd8k22qUdaZQNAJ0t5zRwNvmaITiDnzA,3582
16
+ keras_rs/src/layers/retrieval/remove_accidental_hits.py,sha256=uMul2tkI3hjnjIYUp4Kwl6tI6OmJSjAiHg2m40v8eKo,3781
17
17
  keras_rs/src/layers/retrieval/retrieval.py,sha256=hVOBF10SF2q_TgJdVUqztbnw5qQF-cxVRGdJbOKoL9M,4191
18
- keras_rs/src/layers/retrieval/sampling_probability_correction.py,sha256=80vgOPfBiF-PC0dSyqS57IcIxOxi_Q_R7eSXHn1G0yI,1437
18
+ keras_rs/src/layers/retrieval/sampling_probability_correction.py,sha256=YX93TfqkckJiZB7gQYyMWQMx83UNHTedudrNyNdut0c,1965
19
19
  keras_rs/src/losses/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
- keras_rs/src/losses/pairwise_hinge_loss.py,sha256=nrIU0d1IcCAGo7RVxNitkldJhY2ZrXxjTV7Po27FXds,950
21
- keras_rs/src/losses/pairwise_logistic_loss.py,sha256=2dTtRmrNfvF_lOvHK0UQ518L2d4fkvQZDj30HWB5A2s,1305
22
- keras_rs/src/losses/pairwise_loss.py,sha256=rmDr_Qc3yA0CR8rUCCGjOgdbjYfC505BLNuITyb1n8k,6132
20
+ keras_rs/src/losses/pairwise_hinge_loss.py,sha256=akzcNVvNgXiNCDs7amlhHS8ezrMed_hHo_YaWZXsC_c,3086
21
+ keras_rs/src/losses/pairwise_logistic_loss.py,sha256=Pej0PFLZGyGaO-li7Rhm8n8xkQ5ZMz1bzJmQ7HE9w48,3512
22
+ keras_rs/src/losses/pairwise_loss.py,sha256=CxZpFrByHsq6wjP7WeplRJO9LV8_X17JesOitdysSig,6152
23
23
  keras_rs/src/losses/pairwise_loss_utils.py,sha256=xvdGvdKNkvGvIaWYEQziWTFNa5EJz7rdkVGgrsnDHUk,1246
24
- keras_rs/src/losses/pairwise_mean_squared_error.py,sha256=KhSRvjg4RpwhASP1Sl7PZoq2488P_uGDr9tZWzZhDVU,2764
25
- keras_rs/src/losses/pairwise_soft_zero_one_loss.py,sha256=QdWn-lyWQM-U9ID9xGQ7oK10q9XT6qd1gxVAKy8hZH4,1239
24
+ keras_rs/src/losses/pairwise_mean_squared_error.py,sha256=rtflbaxvOU1ctcK1MGjQek5dbeH0ewPRNxAqVMvyskw,4892
25
+ keras_rs/src/losses/pairwise_soft_zero_one_loss.py,sha256=lXzxRrElteDXpwZKUf60w7iHaMuNl70Fp0xSXFrtxUo,3531
26
26
  keras_rs/src/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
- keras_rs/src/metrics/dcg.py,sha256=DzSBc9ZbgNavuHRt3wtVzdx4ouAaaqeYhd9NxQLPq0g,5120
28
- keras_rs/src/metrics/mean_average_precision.py,sha256=SF5NlhlyVL9L_YVkj_s_135f3-8hILVHRziSGafGyZI,3915
29
- keras_rs/src/metrics/mean_reciprocal_rank.py,sha256=4stq0MzyWNokMlol6BESDAMuoUFieDrFFc57ue94h4Y,3240
30
- keras_rs/src/metrics/ndcg.py,sha256=G7WNFoUaOhnf4vMF1jgcI4yGxieUfJv5E0upv4Qs1AQ,6545
31
- keras_rs/src/metrics/precision_at_k.py,sha256=u-mj49qamt448gxkOI9YIZMMrhgO8QmetRFXGGlWOqY,3247
32
- keras_rs/src/metrics/ranking_metric.py,sha256=cdFb4Lg2Z8P-02ImMGUAX4XeOUyzEE8TA6nB4fDgq0U,10411
27
+ keras_rs/src/metrics/dcg.py,sha256=595DyAehV4yF4TeWPo4y4bYG4zzEgvAlGAtupqyPM6A,5883
28
+ keras_rs/src/metrics/mean_average_precision.py,sha256=9fT_Kvm5XaDH6j3d5Yg_ubz6wgRmWRFnwZmoGwEUzLg,4712
29
+ keras_rs/src/metrics/mean_reciprocal_rank.py,sha256=-zcFcbcJeopiMj0ZLyzg3heOa6zpZEQ9p0we8Oj87LA,4034
30
+ keras_rs/src/metrics/ndcg.py,sha256=bKd3h-xoAmHSBx8xJdF8eJR3r1U3LpI6JCML3r1BDA8,7310
31
+ keras_rs/src/metrics/precision_at_k.py,sha256=1xHCNsZXo0VHDLrl1sRYF-zMLzSNoZaqwys5Ly9J9qI,4029
32
+ keras_rs/src/metrics/ranking_metric.py,sha256=rzcVxQFLbcunATo9-L_RlZe0RLDTi5T9eaSdzKILHLw,10440
33
33
  keras_rs/src/metrics/ranking_metrics_utils.py,sha256=989J8pr6FRsA1HwBeF7SA8uQqjZT2XeCxKfRuMysWnQ,8828
34
- keras_rs/src/metrics/recall_at_k.py,sha256=hlPnR5AtFjdd5AG0zLkLGVyLO5mWtp2bAu_cSOq9Fws,2919
34
+ keras_rs/src/metrics/recall_at_k.py,sha256=aL6Mxu16XSxoZ0lFmESZ3b0xC6Ga_sjd7M4Q0o1b5hs,3695
35
35
  keras_rs/src/metrics/utils.py,sha256=6xanTNdwARn4ugzmb7ko2kwAhNhsnR4NhrpS_qW0IKc,2506
36
36
  keras_rs/src/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
37
  keras_rs/src/utils/doc_string_utils.py,sha256=yVyQ8pYdl4gd4tKRhD8dXmQX1EwZeLiV3cCq3A1tUEk,1466
38
38
  keras_rs/src/utils/keras_utils.py,sha256=d28OdQP4GrJk4NIQS4n0KPtCbgOCxVU_vDnnI7ODpOw,1562
39
- keras_rs_nightly-0.0.1.dev2025042803.dist-info/METADATA,sha256=4W-DkQ0hKfcBFI6CAJzbJWAiNRGhLGFYrxLyGVo8GBM,3614
40
- keras_rs_nightly-0.0.1.dev2025042803.dist-info/WHEEL,sha256=ck4Vq1_RXyvS4Jt6SI0Vz6fyVs4GWg7AINwpsaGEgPE,91
41
- keras_rs_nightly-0.0.1.dev2025042803.dist-info/top_level.txt,sha256=pWs8X78Z0cn6lfcIb9VYOW5UeJ-TpoaO9dByzo7_FFo,9
42
- keras_rs_nightly-0.0.1.dev2025042803.dist-info/RECORD,,
39
+ keras_rs_nightly-0.0.1.dev2025042903.dist-info/METADATA,sha256=ZGbBcnmbci7Jh1FroUPNSS4-Nvq4utJny4yWp5zCOac,5208
40
+ keras_rs_nightly-0.0.1.dev2025042903.dist-info/WHEEL,sha256=ck4Vq1_RXyvS4Jt6SI0Vz6fyVs4GWg7AINwpsaGEgPE,91
41
+ keras_rs_nightly-0.0.1.dev2025042903.dist-info/top_level.txt,sha256=pWs8X78Z0cn6lfcIb9VYOW5UeJ-TpoaO9dByzo7_FFo,9
42
+ keras_rs_nightly-0.0.1.dev2025042903.dist-info/RECORD,,