nextrec 0.4.15__py3-none-any.whl → 0.4.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nextrec/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.4.15"
1
+ __version__ = "0.4.17"
nextrec/basic/heads.py ADDED
@@ -0,0 +1,101 @@
1
+ """
2
+ Task head implementations for NextRec models.
3
+
4
+ Date: create on 23/12/2025
5
+ Author: Yang Zhou, zyaztec@gmail.com
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from typing import Literal
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ import torch.nn.functional as F
15
+
16
+ from nextrec.basic.layers import PredictionLayer
17
+
18
+
19
+ class TaskHead(nn.Module):
20
+ """
21
+ Unified task head for ranking/regression/multi-task outputs.
22
+
23
+ This wraps PredictionLayer so models can depend on a "Head" abstraction
24
+ without changing their existing forward signatures.
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ task_type: str | list[str] = "binary",
30
+ task_dims: int | list[int] | None = None,
31
+ use_bias: bool = True,
32
+ return_logits: bool = False,
33
+ ) -> None:
34
+ super().__init__()
35
+ self.prediction = PredictionLayer(
36
+ task_type=task_type,
37
+ task_dims=task_dims,
38
+ use_bias=use_bias,
39
+ return_logits=return_logits,
40
+ )
41
+ # Expose commonly used attributes for compatibility with PredictionLayer.
42
+ self.task_types = self.prediction.task_types
43
+ self.task_dims = self.prediction.task_dims
44
+ self.task_slices = self.prediction.task_slices
45
+ self.total_dim = self.prediction.total_dim
46
+
47
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
48
+ return self.prediction(x)
49
+
50
+
51
+ class RetrievalHead(nn.Module):
52
+ """
53
+ Retrieval head for two-tower models.
54
+
55
+ It computes similarity for pointwise training/inference, and returns
56
+ raw embeddings for in-batch negative sampling in pairwise/listwise modes.
57
+ """
58
+
59
+ def __init__(
60
+ self,
61
+ similarity_metric: Literal["dot", "cosine", "euclidean"] = "dot",
62
+ temperature: float = 1.0,
63
+ training_mode: Literal["pointwise", "pairwise", "listwise"] = "pointwise",
64
+ apply_sigmoid: bool = True,
65
+ ) -> None:
66
+ super().__init__()
67
+ self.similarity_metric = similarity_metric
68
+ self.temperature = temperature
69
+ self.training_mode = training_mode
70
+ self.apply_sigmoid = apply_sigmoid
71
+
72
+ def forward(
73
+ self,
74
+ user_emb: torch.Tensor,
75
+ item_emb: torch.Tensor,
76
+ similarity_fn=None,
77
+ ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
78
+ if self.training and self.training_mode in {"pairwise", "listwise"}:
79
+ return user_emb, item_emb
80
+
81
+ if similarity_fn is not None:
82
+ similarity = similarity_fn(user_emb, item_emb)
83
+ else:
84
+ if user_emb.dim() == 2 and item_emb.dim() == 3:
85
+ user_emb = user_emb.unsqueeze(1)
86
+
87
+ if self.similarity_metric == "dot":
88
+ similarity = torch.sum(user_emb * item_emb, dim=-1)
89
+ elif self.similarity_metric == "cosine":
90
+ similarity = F.cosine_similarity(user_emb, item_emb, dim=-1)
91
+ elif self.similarity_metric == "euclidean":
92
+ similarity = -torch.sum((user_emb - item_emb) ** 2, dim=-1)
93
+ else:
94
+ raise ValueError(
95
+ f"Unknown similarity metric: {self.similarity_metric}"
96
+ )
97
+
98
+ similarity = similarity / self.temperature
99
+ if self.training_mode == "pointwise" and self.apply_sigmoid:
100
+ return torch.sigmoid(similarity)
101
+ return similarity
nextrec/basic/metrics.py CHANGED
@@ -77,6 +77,8 @@ def check_user_id(*metric_sources: Any) -> bool:
77
77
 
78
78
  def compute_ks(y_true: np.ndarray, y_pred: np.ndarray) -> float:
79
79
  """Compute Kolmogorov-Smirnov statistic."""
80
+ y_true = np.asarray(y_true).reshape(-1)
81
+ y_pred = np.asarray(y_pred).reshape(-1)
80
82
  sorted_indices = np.argsort(y_pred)[::-1]
81
83
  y_true_sorted = y_true[sorted_indices]
82
84
 
nextrec/basic/model.py CHANGED
@@ -38,6 +38,7 @@ from nextrec.basic.features import (
38
38
  SequenceFeature,
39
39
  SparseFeature,
40
40
  )
41
+ from nextrec.basic.heads import RetrievalHead
41
42
  from nextrec.basic.loggers import TrainingLogger, colorize, format_kv, setup_logger
42
43
  from nextrec.basic.metrics import check_user_id, configure_metrics, evaluate_metrics
43
44
  from nextrec.basic.session import create_session, resolve_save_path
@@ -481,7 +482,7 @@ class BaseModel(FeatureSet, nn.Module):
481
482
  "[BaseModel-compile Error] loss_weights list must have exactly one element for single-task setup."
482
483
  )
483
484
  loss_weights = loss_weights[0]
484
- self.loss_weights = [float(loss_weights)] # type: ignore
485
+ self.loss_weights = [float(loss_weights)] # type: ignore
485
486
  else:
486
487
  if isinstance(loss_weights, (int, float)):
487
488
  weights = [float(loss_weights)] * self.nums_task
@@ -591,8 +592,8 @@ class BaseModel(FeatureSet, nn.Module):
591
592
 
592
593
  def fit(
593
594
  self,
594
- train_data = None,
595
- valid_data = None,
595
+ train_data=None,
596
+ valid_data=None,
596
597
  metrics: (
597
598
  list[str] | dict[str, list[str]] | None
598
599
  ) = None, # ['auc', 'logloss'] or {'target1': ['auc', 'logloss'], 'target2': ['mse']}
@@ -1583,8 +1584,11 @@ class BaseModel(FeatureSet, nn.Module):
1583
1584
  else:
1584
1585
  data_loader = data
1585
1586
 
1586
- if hasattr(data_loader, 'num_workers') and data_loader.num_workers > 0:
1587
- if hasattr(data_loader.dataset, '__class__') and 'Streaming' in data_loader.dataset.__class__.__name__:
1587
+ if hasattr(data_loader, "num_workers") and data_loader.num_workers > 0:
1588
+ if (
1589
+ hasattr(data_loader.dataset, "__class__")
1590
+ and "Streaming" in data_loader.dataset.__class__.__name__
1591
+ ):
1588
1592
  logging.warning(
1589
1593
  f"[Predict Streaming Warning] Detected DataLoader with num_workers={data_loader.num_workers} "
1590
1594
  "and streaming dataset. This may cause data duplication! "
@@ -2112,6 +2116,12 @@ class BaseMatchModel(BaseModel):
2112
2116
  )
2113
2117
  self.user_feature_names = {feature.name for feature in self.user_features_all}
2114
2118
  self.item_feature_names = {feature.name for feature in self.item_features_all}
2119
+ self.head = RetrievalHead(
2120
+ similarity_metric=self.similarity_metric,
2121
+ temperature=self.temperature,
2122
+ training_mode=self.training_mode,
2123
+ apply_sigmoid=True,
2124
+ )
2115
2125
 
2116
2126
  def compile(
2117
2127
  self,
@@ -2241,15 +2251,9 @@ class BaseMatchModel(BaseModel):
2241
2251
  user_emb = self.user_tower(user_input) # [B, D]
2242
2252
  item_emb = self.item_tower(item_input) # [B, D]
2243
2253
 
2244
- if self.training and self.training_mode in ["pairwise", "listwise"]:
2245
- return user_emb, item_emb
2246
-
2247
- similarity = self.compute_similarity(user_emb, item_emb) # [B]
2248
-
2249
- if self.training_mode == "pointwise":
2250
- return torch.sigmoid(similarity)
2251
- else:
2252
- return similarity
2254
+ return self.head(
2255
+ user_emb, item_emb, similarity_fn=self.compute_similarity
2256
+ )
2253
2257
 
2254
2258
  def compute_loss(self, y_pred, y_true):
2255
2259
  if self.training_mode == "pointwise":
@@ -45,7 +45,8 @@ import torch
45
45
  import torch.nn as nn
46
46
 
47
47
  from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
48
- from nextrec.basic.layers import MLP, EmbeddingLayer, PredictionLayer
48
+ from nextrec.basic.layers import MLP, EmbeddingLayer
49
+ from nextrec.basic.heads import TaskHead
49
50
  from nextrec.basic.model import BaseModel
50
51
 
51
52
 
@@ -139,7 +140,7 @@ class ESMM(BaseModel):
139
140
  # CVR tower
140
141
  self.cvr_tower = MLP(input_dim=input_dim, output_layer=True, **cvr_params)
141
142
  self.grad_norm_shared_modules = ["embedding"]
142
- self.prediction_layer = PredictionLayer(
143
+ self.prediction_layer = TaskHead(
143
144
  task_type=self.default_task, task_dims=[1, 1]
144
145
  )
145
146
  # Register regularization weights
@@ -167,4 +168,4 @@ class ESMM(BaseModel):
167
168
 
168
169
  # Output: [CTR, CTCVR], We supervise CTR with click labels and CTCVR with conversion labels
169
170
  y = torch.cat([ctr, ctcvr], dim=1) # [B, 2]
170
- return y # [B, 2], where y[:, 0] is CTR and y[:, 1] is CTCVR
171
+ return y # [B, 2], where y[:, 0] is CTR and y[:, 1] is CTCVR
@@ -46,7 +46,8 @@ import torch
46
46
  import torch.nn as nn
47
47
 
48
48
  from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
49
- from nextrec.basic.layers import MLP, EmbeddingLayer, PredictionLayer
49
+ from nextrec.basic.layers import MLP, EmbeddingLayer
50
+ from nextrec.basic.heads import TaskHead
50
51
  from nextrec.basic.model import BaseModel
51
52
 
52
53
 
@@ -172,7 +173,7 @@ class MMOE(BaseModel):
172
173
  for tower_params in tower_params_list:
173
174
  tower = MLP(input_dim=expert_output_dim, output_layer=True, **tower_params)
174
175
  self.towers.append(tower)
175
- self.prediction_layer = PredictionLayer(
176
+ self.prediction_layer = TaskHead(
176
177
  task_type=self.default_task, task_dims=[1] * self.num_tasks
177
178
  )
178
179
  # Register regularization weights
@@ -219,4 +220,4 @@ class MMOE(BaseModel):
219
220
 
220
221
  # Stack outputs: [B, num_tasks]
221
222
  y = torch.cat(task_outputs, dim=1)
222
- return self.prediction_layer(y)
223
+ return self.prediction_layer(y)
@@ -49,7 +49,8 @@ import torch
49
49
  import torch.nn as nn
50
50
 
51
51
  from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
52
- from nextrec.basic.layers import MLP, EmbeddingLayer, PredictionLayer
52
+ from nextrec.basic.layers import MLP, EmbeddingLayer
53
+ from nextrec.basic.heads import TaskHead
53
54
  from nextrec.basic.model import BaseModel
54
55
  from nextrec.utils.model import get_mlp_output_dim
55
56
 
@@ -302,7 +303,7 @@ class PLE(BaseModel):
302
303
  for tower_params in tower_params_list:
303
304
  tower = MLP(input_dim=expert_output_dim, output_layer=True, **tower_params)
304
305
  self.towers.append(tower)
305
- self.prediction_layer = PredictionLayer(
306
+ self.prediction_layer = TaskHead(
306
307
  task_type=self.default_task, task_dims=[1] * self.num_tasks
307
308
  )
308
309
  # Register regularization weights
@@ -336,4 +337,4 @@ class PLE(BaseModel):
336
337
 
337
338
  # [B, num_tasks]
338
339
  y = torch.cat(task_outputs, dim=1)
339
- return self.prediction_layer(y)
340
+ return self.prediction_layer(y)
@@ -44,7 +44,8 @@ import torch.nn.functional as F
44
44
 
45
45
  from nextrec.basic.activation import activation_layer
46
46
  from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
47
- from nextrec.basic.layers import MLP, EmbeddingLayer, PredictionLayer
47
+ from nextrec.basic.layers import MLP, EmbeddingLayer
48
+ from nextrec.basic.heads import TaskHead
48
49
  from nextrec.basic.model import BaseModel
49
50
  from nextrec.utils.model import select_features
50
51
 
@@ -487,7 +488,7 @@ class POSO(BaseModel):
487
488
  self.grad_norm_shared_modules = ["embedding"]
488
489
  else:
489
490
  self.grad_norm_shared_modules = ["embedding", "mmoe"]
490
- self.prediction_layer = PredictionLayer(
491
+ self.prediction_layer = TaskHead(
491
492
  task_type=self.default_task,
492
493
  task_dims=[1] * self.num_tasks,
493
494
  )
@@ -524,4 +525,4 @@ class POSO(BaseModel):
524
525
  task_outputs.append(logit)
525
526
 
526
527
  y = torch.cat(task_outputs, dim=1)
527
- return self.prediction_layer(y)
528
+ return self.prediction_layer(y)
@@ -43,7 +43,8 @@ import torch
43
43
  import torch.nn as nn
44
44
 
45
45
  from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
46
- from nextrec.basic.layers import MLP, EmbeddingLayer, PredictionLayer
46
+ from nextrec.basic.layers import MLP, EmbeddingLayer
47
+ from nextrec.basic.heads import TaskHead
47
48
  from nextrec.basic.model import BaseModel
48
49
 
49
50
 
@@ -142,7 +143,7 @@ class ShareBottom(BaseModel):
142
143
  for tower_params in tower_params_list:
143
144
  tower = MLP(input_dim=bottom_output_dim, output_layer=True, **tower_params)
144
145
  self.towers.append(tower)
145
- self.prediction_layer = PredictionLayer(
146
+ self.prediction_layer = TaskHead(
146
147
  task_type=self.default_task, task_dims=[1] * self.num_tasks
147
148
  )
148
149
  # Register regularization weights
@@ -171,4 +172,4 @@ class ShareBottom(BaseModel):
171
172
 
172
173
  # Stack outputs: [B, num_tasks]
173
174
  y = torch.cat(task_outputs, dim=1)
174
- return self.prediction_layer(y)
175
+ return self.prediction_layer(y)
@@ -40,7 +40,8 @@ import torch
40
40
  import torch.nn as nn
41
41
 
42
42
  from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
43
- from nextrec.basic.layers import EmbeddingLayer, InputMask, PredictionLayer
43
+ from nextrec.basic.layers import EmbeddingLayer, InputMask
44
+ from nextrec.basic.heads import TaskHead
44
45
  from nextrec.basic.model import BaseModel
45
46
 
46
47
 
@@ -141,7 +142,7 @@ class AFM(BaseModel):
141
142
  self.attention_p = nn.Linear(attention_dim, 1, bias=False)
142
143
  self.attention_dropout = nn.Dropout(attention_dropout)
143
144
  self.output_projection = nn.Linear(self.embedding_dim, 1, bias=False)
144
- self.prediction_layer = PredictionLayer(task_type=self.default_task)
145
+ self.prediction_layer = TaskHead(task_type=self.default_task)
145
146
  self.input_mask = InputMask()
146
147
 
147
148
  # Register regularization weights
@@ -243,4 +244,4 @@ class AFM(BaseModel):
243
244
  y_afm = self.output_projection(weighted_sum)
244
245
 
245
246
  y = y_linear + y_afm
246
- return self.prediction_layer(y)
247
+ return self.prediction_layer(y)
@@ -58,7 +58,8 @@ import torch
58
58
  import torch.nn as nn
59
59
 
60
60
  from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
61
- from nextrec.basic.layers import EmbeddingLayer, MultiHeadSelfAttention, PredictionLayer
61
+ from nextrec.basic.layers import EmbeddingLayer, MultiHeadSelfAttention
62
+ from nextrec.basic.heads import TaskHead
62
63
  from nextrec.basic.model import BaseModel
63
64
 
64
65
 
@@ -162,7 +163,7 @@ class AutoInt(BaseModel):
162
163
 
163
164
  # Final prediction layer
164
165
  self.fc = nn.Linear(num_fields * att_embedding_dim, 1)
165
- self.prediction_layer = PredictionLayer(task_type=self.default_task)
166
+ self.prediction_layer = TaskHead(task_type=self.default_task)
166
167
 
167
168
  # Register regularization weights
168
169
  self.register_regularization_weights(
@@ -206,4 +207,4 @@ class AutoInt(BaseModel):
206
207
  start_dim=1
207
208
  ) # [B, num_fields * att_embedding_dim]
208
209
  y = self.fc(attention_output_flat) # [B, 1]
209
- return self.prediction_layer(y)
210
+ return self.prediction_layer(y)
@@ -54,7 +54,8 @@ import torch
54
54
  import torch.nn as nn
55
55
 
56
56
  from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
57
- from nextrec.basic.layers import MLP, EmbeddingLayer, PredictionLayer
57
+ from nextrec.basic.layers import MLP, EmbeddingLayer
58
+ from nextrec.basic.heads import TaskHead
58
59
  from nextrec.basic.model import BaseModel
59
60
 
60
61
 
@@ -163,7 +164,7 @@ class DCN(BaseModel):
163
164
  # Final layer only uses cross network output
164
165
  self.final_layer = nn.Linear(input_dim, 1)
165
166
 
166
- self.prediction_layer = PredictionLayer(task_type=self.task)
167
+ self.prediction_layer = TaskHead(task_type=self.task)
167
168
 
168
169
  # Register regularization weights
169
170
  self.register_regularization_weights(
@@ -197,4 +198,4 @@ class DCN(BaseModel):
197
198
 
198
199
  # Final prediction
199
200
  y = self.final_layer(combined)
200
- return self.prediction_layer(y)
201
+ return self.prediction_layer(y)
@@ -47,7 +47,8 @@ import torch
47
47
  import torch.nn as nn
48
48
 
49
49
  from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
50
- from nextrec.basic.layers import MLP, EmbeddingLayer, PredictionLayer
50
+ from nextrec.basic.layers import MLP, EmbeddingLayer
51
+ from nextrec.basic.heads import TaskHead
51
52
  from nextrec.basic.model import BaseModel
52
53
 
53
54
 
@@ -272,7 +273,7 @@ class DCNv2(BaseModel):
272
273
  final_input_dim = input_dim
273
274
 
274
275
  self.final_layer = nn.Linear(final_input_dim, 1)
275
- self.prediction_layer = PredictionLayer(task_type=self.default_task)
276
+ self.prediction_layer = TaskHead(task_type=self.default_task)
276
277
 
277
278
  self.register_regularization_weights(
278
279
  embedding_attr="embedding",
@@ -301,4 +302,4 @@ class DCNv2(BaseModel):
301
302
  combined = cross_out
302
303
 
303
304
  logit = self.final_layer(combined)
304
- return self.prediction_layer(logit)
305
+ return self.prediction_layer(logit)
@@ -45,7 +45,8 @@ embedding,无需手工构造交叉特征即可端到端训练,常用于 CTR/
45
45
  import torch.nn as nn
46
46
 
47
47
  from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
48
- from nextrec.basic.layers import FM, LR, MLP, EmbeddingLayer, PredictionLayer
48
+ from nextrec.basic.layers import FM, LR, MLP, EmbeddingLayer
49
+ from nextrec.basic.heads import TaskHead
49
50
  from nextrec.basic.model import BaseModel
50
51
 
51
52
 
@@ -111,7 +112,7 @@ class DeepFM(BaseModel):
111
112
  self.linear = LR(fm_emb_dim_total)
112
113
  self.fm = FM(reduce_sum=True)
113
114
  self.mlp = MLP(input_dim=mlp_input_dim, **mlp_params)
114
- self.prediction_layer = PredictionLayer(task_type=self.default_task)
115
+ self.prediction_layer = TaskHead(task_type=self.default_task)
115
116
 
116
117
  # Register regularization weights
117
118
  self.register_regularization_weights(
@@ -133,4 +134,4 @@ class DeepFM(BaseModel):
133
134
  y_deep = self.mlp(input_deep) # [B, 1]
134
135
 
135
136
  y = y_linear + y_fm + y_deep
136
- return self.prediction_layer(y)
137
+ return self.prediction_layer(y)
@@ -55,8 +55,8 @@ from nextrec.basic.layers import (
55
55
  MLP,
56
56
  AttentionPoolingLayer,
57
57
  EmbeddingLayer,
58
- PredictionLayer,
59
58
  )
59
+ from nextrec.basic.heads import TaskHead
60
60
  from nextrec.basic.model import BaseModel
61
61
 
62
62
 
@@ -346,7 +346,7 @@ class DIEN(BaseModel):
346
346
  )
347
347
 
348
348
  self.mlp = MLP(input_dim=mlp_input_dim, **mlp_params)
349
- self.prediction_layer = PredictionLayer(task_type=self.task)
349
+ self.prediction_layer = TaskHead(task_type=self.task)
350
350
 
351
351
  self.register_regularization_weights(
352
352
  embedding_attr="embedding",
@@ -55,8 +55,8 @@ from nextrec.basic.layers import (
55
55
  MLP,
56
56
  AttentionPoolingLayer,
57
57
  EmbeddingLayer,
58
- PredictionLayer,
59
58
  )
59
+ from nextrec.basic.heads import TaskHead
60
60
  from nextrec.basic.model import BaseModel
61
61
 
62
62
 
@@ -173,7 +173,7 @@ class DIN(BaseModel):
173
173
 
174
174
  # MLP for final prediction
175
175
  self.mlp = MLP(input_dim=mlp_input_dim, **mlp_params)
176
- self.prediction_layer = PredictionLayer(task_type=self.task)
176
+ self.prediction_layer = TaskHead(task_type=self.task)
177
177
 
178
178
  # Register regularization weights
179
179
  self.register_regularization_weights(
@@ -38,7 +38,8 @@ import torch.nn as nn
38
38
  import torch.nn.functional as F
39
39
 
40
40
  from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
41
- from nextrec.basic.layers import LR, EmbeddingLayer, PredictionLayer
41
+ from nextrec.basic.layers import LR, EmbeddingLayer
42
+ from nextrec.basic.heads import TaskHead
42
43
  from nextrec.basic.model import BaseModel
43
44
 
44
45
 
@@ -295,7 +296,7 @@ class EulerNet(BaseModel):
295
296
  else:
296
297
  self.linear = None
297
298
 
298
- self.prediction_layer = PredictionLayer(task_type=self.task)
299
+ self.prediction_layer = TaskHead(task_type=self.task)
299
300
 
300
301
  modules = ["mapping", "layers", "w", "w_im"]
301
302
  if self.use_linear:
@@ -331,4 +332,4 @@ class EulerNet(BaseModel):
331
332
  r, p = layer(r, p)
332
333
  r_flat = r.reshape(r.size(0), self.num_orders * self.embedding_dim)
333
334
  p_flat = p.reshape(p.size(0), self.num_orders * self.embedding_dim)
334
- return self.w(r_flat) + self.w_im(p_flat)
335
+ return self.w(r_flat) + self.w_im(p_flat)
@@ -43,7 +43,8 @@ import torch
43
43
  import torch.nn as nn
44
44
 
45
45
  from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
46
- from nextrec.basic.layers import AveragePooling, InputMask, PredictionLayer, SumPooling
46
+ from nextrec.basic.layers import AveragePooling, InputMask, SumPooling
47
+ from nextrec.basic.heads import TaskHead
47
48
  from nextrec.basic.model import BaseModel
48
49
  from nextrec.utils.torch_utils import get_initializer
49
50
 
@@ -140,7 +141,7 @@ class FFM(BaseModel):
140
141
  nn.Linear(dense_input_dim, 1, bias=True) if dense_input_dim > 0 else None
141
142
  )
142
143
 
143
- self.prediction_layer = PredictionLayer(task_type=self.task)
144
+ self.prediction_layer = TaskHead(task_type=self.task)
144
145
  self.input_mask = InputMask()
145
146
  self.mean_pool = AveragePooling()
146
147
  self.sum_pool = SumPooling()
@@ -272,4 +273,4 @@ class FFM(BaseModel):
272
273
  )
273
274
 
274
275
  y = y_linear + y_interaction
275
- return self.prediction_layer(y)
276
+ return self.prediction_layer(y)
@@ -50,9 +50,9 @@ from nextrec.basic.layers import (
50
50
  BiLinearInteractionLayer,
51
51
  EmbeddingLayer,
52
52
  HadamardInteractionLayer,
53
- PredictionLayer,
54
53
  SENETLayer,
55
54
  )
55
+ from nextrec.basic.heads import TaskHead
56
56
  from nextrec.basic.model import BaseModel
57
57
 
58
58
 
@@ -168,7 +168,7 @@ class FiBiNET(BaseModel):
168
168
  num_pairs = self.num_fields * (self.num_fields - 1) // 2
169
169
  interaction_dim = num_pairs * self.embedding_dim * 2
170
170
  self.mlp = MLP(input_dim=interaction_dim, **mlp_params)
171
- self.prediction_layer = PredictionLayer(task_type=self.default_task)
171
+ self.prediction_layer = TaskHead(task_type=self.default_task)
172
172
 
173
173
  # Register regularization weights
174
174
  self.register_regularization_weights(
@@ -42,7 +42,8 @@ import torch.nn as nn
42
42
 
43
43
  from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
44
44
  from nextrec.basic.layers import FM as FMInteraction
45
- from nextrec.basic.layers import LR, EmbeddingLayer, PredictionLayer
45
+ from nextrec.basic.heads import TaskHead
46
+ from nextrec.basic.layers import LR, EmbeddingLayer
46
47
  from nextrec.basic.model import BaseModel
47
48
 
48
49
 
@@ -105,7 +106,7 @@ class FM(BaseModel):
105
106
  fm_input_dim = sum([f.embedding_dim for f in self.fm_features])
106
107
  self.linear = LR(fm_input_dim)
107
108
  self.fm = FMInteraction(reduce_sum=True)
108
- self.prediction_layer = PredictionLayer(task_type=self.task)
109
+ self.prediction_layer = TaskHead(task_type=self.task)
109
110
 
110
111
  # Register regularization weights
111
112
  self.register_regularization_weights(
@@ -124,4 +125,4 @@ class FM(BaseModel):
124
125
  y_linear = self.linear(input_fm.flatten(start_dim=1))
125
126
  y_fm = self.fm(input_fm)
126
127
  y = y_linear + y_fm
127
- return self.prediction_layer(y)
128
+ return self.prediction_layer(y)
@@ -41,7 +41,8 @@ LR 是 CTR/排序任务中最经典的线性基线模型。它将稠密、稀疏
41
41
  import torch.nn as nn
42
42
 
43
43
  from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
44
- from nextrec.basic.layers import EmbeddingLayer, LR as LinearLayer, PredictionLayer
44
+ from nextrec.basic.layers import EmbeddingLayer, LR as LinearLayer
45
+ from nextrec.basic.heads import TaskHead
45
46
  from nextrec.basic.model import BaseModel
46
47
 
47
48
 
@@ -99,7 +100,7 @@ class LR(BaseModel):
99
100
  self.embedding = EmbeddingLayer(features=self.all_features)
100
101
  linear_input_dim = self.embedding.input_dim
101
102
  self.linear = LinearLayer(linear_input_dim)
102
- self.prediction_layer = PredictionLayer(task_type=self.task)
103
+ self.prediction_layer = TaskHead(task_type=self.task)
103
104
 
104
105
  self.register_regularization_weights(
105
106
  embedding_attr="embedding", include_modules=["linear"]
@@ -115,4 +116,4 @@ class LR(BaseModel):
115
116
  def forward(self, x):
116
117
  input_linear = self.embedding(x=x, features=self.all_features, squeeze_dim=True)
117
118
  y = self.linear(input_linear)
118
- return self.prediction_layer(y)
119
+ return self.prediction_layer(y)
@@ -58,7 +58,8 @@ import torch.nn as nn
58
58
  import torch.nn.functional as F
59
59
 
60
60
  from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
61
- from nextrec.basic.layers import MLP, EmbeddingLayer, PredictionLayer
61
+ from nextrec.basic.layers import MLP, EmbeddingLayer
62
+ from nextrec.basic.heads import TaskHead
62
63
  from nextrec.basic.model import BaseModel
63
64
 
64
65
 
@@ -282,7 +283,7 @@ class MaskNet(BaseModel):
282
283
  input_dim=self.num_blocks * block_hidden_dim, **mlp_params
283
284
  )
284
285
  self.output_layer = None
285
- self.prediction_layer = PredictionLayer(task_type=self.task)
286
+ self.prediction_layer = TaskHead(task_type=self.task)
286
287
 
287
288
  if self.architecture == "serial":
288
289
  self.register_regularization_weights(
@@ -323,4 +324,4 @@ class MaskNet(BaseModel):
323
324
  hidden = self.block_dropout(hidden)
324
325
  logit = self.output_layer(hidden) # [B, 1]
325
326
  y = self.prediction_layer(logit)
326
- return y
327
+ return y
@@ -38,7 +38,8 @@ import torch
38
38
  import torch.nn as nn
39
39
 
40
40
  from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
41
- from nextrec.basic.layers import MLP, EmbeddingLayer, PredictionLayer
41
+ from nextrec.basic.layers import MLP, EmbeddingLayer
42
+ from nextrec.basic.heads import TaskHead
42
43
  from nextrec.basic.model import BaseModel
43
44
 
44
45
 
@@ -136,7 +137,7 @@ class PNN(BaseModel):
136
137
  product_dim = 2 * self.num_pairs
137
138
 
138
139
  self.mlp = MLP(input_dim=linear_dim + product_dim, **mlp_params)
139
- self.prediction_layer = PredictionLayer(task_type=self.task)
140
+ self.prediction_layer = TaskHead(task_type=self.task)
140
141
 
141
142
  modules = ["mlp"]
142
143
  if self.kernel is not None:
@@ -197,4 +198,4 @@ class PNN(BaseModel):
197
198
 
198
199
  deep_input = torch.cat([linear_signal, product_signal], dim=1)
199
200
  y = self.mlp(deep_input)
200
- return self.prediction_layer(y)
201
+ return self.prediction_layer(y)
@@ -42,7 +42,8 @@ Wide & Deep 同时使用宽线性部分(记忆共现/手工交叉)与深网
42
42
  import torch.nn as nn
43
43
 
44
44
  from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
45
- from nextrec.basic.layers import LR, MLP, EmbeddingLayer, PredictionLayer
45
+ from nextrec.basic.layers import LR, MLP, EmbeddingLayer
46
+ from nextrec.basic.heads import TaskHead
46
47
  from nextrec.basic.model import BaseModel
47
48
 
48
49
 
@@ -114,7 +115,7 @@ class WideDeep(BaseModel):
114
115
  # deep_emb_dim_total = sum([f.embedding_dim for f in self.deep_features if not isinstance(f, DenseFeature)])
115
116
  # dense_input_dim = sum([getattr(f, "embedding_dim", 1) or 1 for f in dense_features])
116
117
  self.mlp = MLP(input_dim=input_dim, **mlp_params)
117
- self.prediction_layer = PredictionLayer(task_type=self.task)
118
+ self.prediction_layer = TaskHead(task_type=self.task)
118
119
  # Register regularization weights
119
120
  self.register_regularization_weights(
120
121
  embedding_attr="embedding", include_modules=["linear", "mlp"]
@@ -137,4 +138,4 @@ class WideDeep(BaseModel):
137
138
 
138
139
  # Combine wide and deep
139
140
  y = y_wide + y_deep
140
- return self.prediction_layer(y)
141
+ return self.prediction_layer(y)
@@ -56,7 +56,8 @@ import torch.nn as nn
56
56
  import torch.nn.functional as F
57
57
 
58
58
  from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
59
- from nextrec.basic.layers import LR, MLP, EmbeddingLayer, PredictionLayer
59
+ from nextrec.basic.layers import LR, MLP, EmbeddingLayer
60
+ from nextrec.basic.heads import TaskHead
60
61
  from nextrec.basic.model import BaseModel
61
62
 
62
63
 
@@ -186,7 +187,7 @@ class xDeepFM(BaseModel):
186
187
  [getattr(f, "embedding_dim", 1) or 1 for f in dense_features]
187
188
  )
188
189
  self.mlp = MLP(input_dim=deep_emb_dim_total + dense_input_dim, **mlp_params)
189
- self.prediction_layer = PredictionLayer(task_type=self.task)
190
+ self.prediction_layer = TaskHead(task_type=self.task)
190
191
 
191
192
  # Register regularization weights
192
193
  self.register_regularization_weights(
@@ -218,4 +219,4 @@ class xDeepFM(BaseModel):
218
219
 
219
220
  # Combine all parts
220
221
  y = y_linear + y_cin + y_deep
221
- return self.prediction_layer(y)
222
+ return self.prediction_layer(y)
nextrec/utils/config.py CHANGED
@@ -28,7 +28,9 @@ if TYPE_CHECKING:
28
28
  from nextrec.data.preprocessor import DataProcessor
29
29
 
30
30
 
31
- def resolve_path(path_str: str | Path | None = None, base_dir: Path | None = None) -> Path:
31
+ def resolve_path(
32
+ path_str: str | Path | None = None, base_dir: Path | None = None
33
+ ) -> Path:
32
34
  if path_str is None:
33
35
  return Path.cwd()
34
36
  path = Path(path_str).expanduser()
@@ -36,9 +38,16 @@ def resolve_path(path_str: str | Path | None = None, base_dir: Path | None = Non
36
38
  return path
37
39
  # Prefer resolving relative to current working directory when the path (or its parent)
38
40
  # already exists there; otherwise fall back to the config file's directory.
39
- candidates = ((Path.cwd() / path).resolve(), ((base_dir or Path.cwd()) / path).resolve())
41
+ candidates = (
42
+ (Path.cwd() / path).resolve(),
43
+ ((base_dir or Path.cwd()) / path).resolve(),
44
+ )
40
45
  return next(
41
- (candidate for candidate in candidates if candidate.exists() or candidate.parent.exists()),
46
+ (
47
+ candidate
48
+ for candidate in candidates
49
+ if candidate.exists() or candidate.parent.exists()
50
+ ),
42
51
  candidates[0],
43
52
  )
44
53
 
nextrec/utils/console.py CHANGED
@@ -16,8 +16,9 @@ import numbers
16
16
  import os
17
17
  import platform
18
18
  import sys
19
+ import time
19
20
  from datetime import datetime, timedelta
20
- from typing import Any, Callable, Iterable, Mapping, TypeVar
21
+ from typing import Any, Callable, Mapping, TypeVar
21
22
 
22
23
  import numpy as np
23
24
  from rich import box
@@ -128,45 +129,99 @@ class BlackMofNCompleteColumn(MofNCompleteColumn):
128
129
  )
129
130
 
130
131
 
131
- def progress(
132
- iterable: Iterable[T],
133
- *,
134
- description: str | None = None,
135
- total: int | None = None,
136
- disable: bool = False,
137
- ) -> Iterable[T]:
132
+ def progress(iterable, *, description=None, total=None, disable=False):
138
133
  if disable:
139
- for item in iterable:
140
- yield item
134
+ yield from iterable
141
135
  return
136
+
142
137
  resolved_total = total
143
138
  if resolved_total is None:
144
139
  try:
145
- resolved_total = len(iterable) # type: ignore[arg-type]
140
+ resolved_total = len(iterable)
146
141
  except TypeError:
147
142
  resolved_total = None
148
143
 
144
+ stream = sys.stderr
145
+
146
+ if not stream.isatty():
147
+ start_time = time.monotonic()
148
+ last_tick = start_time
149
+ min_interval_seconds = 10.0
150
+ max_interval_seconds = 300.0
151
+ target_steps = (
152
+ max(1, resolved_total // 20) if resolved_total is not None else 500
153
+ )
154
+ interval_seconds = min_interval_seconds
155
+ completed = 0
156
+
157
+ def emit(now: float):
158
+ elapsed = max(0.0, now - start_time)
159
+ speed = completed / elapsed if elapsed > 0 else 0.0
160
+ if resolved_total is not None and speed > 0:
161
+ remaining = max(0.0, resolved_total - completed)
162
+ eta_seconds = remaining / speed
163
+ eta_text = str(timedelta(seconds=int(eta_seconds)))
164
+ else:
165
+ eta_text = "--:--:--"
166
+ total_text = str(resolved_total) if resolved_total is not None else "?"
167
+ stream.write(
168
+ f"{description or 'Working'}: {completed}/{total_text} "
169
+ f"elapsed={timedelta(seconds=int(elapsed))} "
170
+ f"speed={speed:.2f}/s ETA={eta_text}\n"
171
+ )
172
+ stream.flush()
173
+ return speed
174
+
175
+ for item in iterable:
176
+ yield item
177
+ completed += 1
178
+ now = time.monotonic()
179
+ if now - last_tick >= interval_seconds:
180
+ speed = emit(now)
181
+ last_tick = now
182
+ if speed > 0:
183
+ interval_seconds = min(
184
+ max_interval_seconds,
185
+ max(min_interval_seconds, target_steps / speed),
186
+ )
187
+ end_now = time.monotonic()
188
+ if end_now - last_tick >= 1e-6:
189
+ emit(end_now)
190
+ return
191
+
192
+ # TTY: rich
193
+ console = Console(file=stream, force_terminal=True)
149
194
  progress_bar = Progress(
150
- SpinnerColumn(style="black"),
151
- TextColumn("{task.description}", style="black"),
152
- BarColumn(
153
- bar_width=36, style="black", complete_style="black", finished_style="black"
154
- ),
155
- TaskProgressColumn(style="black"),
156
- BlackMofNCompleteColumn(),
157
- BlackTimeElapsedColumn(),
158
- BlackTimeRemainingColumn(),
195
+ SpinnerColumn(),
196
+ TextColumn("{task.description}"),
197
+ BarColumn(bar_width=36),
198
+ TaskProgressColumn(),
199
+ MofNCompleteColumn(),
200
+ TimeElapsedColumn(),
201
+ TimeRemainingColumn(),
159
202
  refresh_per_second=12,
203
+ console=console,
160
204
  )
161
205
 
162
- task_id = progress_bar.add_task(description or "Working", total=resolved_total)
163
- progress_bar.start()
164
- try:
165
- for item in iterable:
166
- yield item
167
- progress_bar.advance(task_id, 1)
168
- finally:
169
- progress_bar.stop()
206
+ if hasattr(progress_bar, "__enter__"):
207
+ with progress_bar:
208
+ task_id = progress_bar.add_task(
209
+ description or "Working", total=resolved_total
210
+ )
211
+ for item in iterable:
212
+ yield item
213
+ progress_bar.advance(task_id, 1)
214
+ else:
215
+ progress_bar.start()
216
+ try:
217
+ task_id = progress_bar.add_task(
218
+ description or "Working", total=resolved_total
219
+ )
220
+ for item in iterable:
221
+ yield item
222
+ progress_bar.advance(task_id, 1)
223
+ finally:
224
+ progress_bar.stop()
170
225
 
171
226
 
172
227
  def group_metrics_by_task(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nextrec
3
- Version: 0.4.15
3
+ Version: 0.4.17
4
4
  Summary: A comprehensive recommendation library with match, ranking, and multi-task learning models
5
5
  Project-URL: Homepage, https://github.com/zerolovesea/NextRec
6
6
  Project-URL: Repository, https://github.com/zerolovesea/NextRec
@@ -68,7 +68,7 @@ Description-Content-Type: text/markdown
68
68
  ![PyTorch](https://img.shields.io/badge/PyTorch-1.10+-ee4c2c.svg)
69
69
 
70
70
  ![License](https://img.shields.io/badge/License-Apache%202.0-green.svg)
71
- ![Version](https://img.shields.io/badge/Version-0.4.15-orange.svg)
71
+ ![Version](https://img.shields.io/badge/Version-0.4.17-orange.svg)
72
72
 
73
73
 
74
74
  中文文档 | [English Version](README_en.md)
@@ -102,7 +102,7 @@ NextRec是一个基于PyTorch的现代推荐系统框架,旨在为研究工程
102
102
 
103
103
  ## NextRec近期进展
104
104
 
105
- - **21/12/2025** 在v0.4.15中加入了对[GradNorm](/nextrec/loss/grad_norm.py)的支持,通过compile的`loss_weight='grad_norm'`进行配置
105
+ - **21/12/2025** 在v0.4.16中加入了对[GradNorm](/nextrec/loss/grad_norm.py)的支持,通过compile的`loss_weight='grad_norm'`进行配置
106
106
  - **12/12/2025** 在v0.4.9中加入了[RQ-VAE](/nextrec/models/representation/rqvae.py)模块。配套的[数据集](/dataset/ecommerce_task.csv)和[代码](tutorials/notebooks/zh/使用RQ-VAE构建语义ID.ipynb)已经同步在仓库中
107
107
  - **07/12/2025** 发布了NextRec CLI命令行工具,它允许用户根据配置文件进行一键训练和推理,我们提供了相关的[教程](/nextrec_cli_preset/NextRec-CLI_zh.md)和[教学代码](/nextrec_cli_preset)
108
108
  - **03/12/2025** NextRec获得了100颗🌟!感谢大家的支持
@@ -244,11 +244,11 @@ nextrec --mode=train --train_config=path/to/train_config.yaml
244
244
  nextrec --mode=predict --predict_config=path/to/predict_config.yaml
245
245
  ```
246
246
 
247
- > 截止当前版本0.4.15,NextRec CLI支持单机训练,分布式训练相关功能尚在开发中。
247
+ > 截止当前版本0.4.17,NextRec CLI支持单机训练,分布式训练相关功能尚在开发中。
248
248
 
249
249
  ## 兼容平台
250
250
 
251
- 当前最新版本为0.4.15,所有模型和测试代码均已在以下平台通过验证,如果开发者在使用中遇到兼容问题,请在issue区提出错误报告及系统版本:
251
+ 当前最新版本为0.4.17,所有模型和测试代码均已在以下平台通过验证,如果开发者在使用中遇到兼容问题,请在issue区提出错误报告及系统版本:
252
252
 
253
253
  | 平台 | 配置 |
254
254
  |------|------|
@@ -1,14 +1,15 @@
1
1
  nextrec/__init__.py,sha256=_M3oUqyuvQ5k8Th_3wId6hQ_caclh7M5ad51XN09m98,235
2
- nextrec/__version__.py,sha256=yHOqz5A9VYGVIjRAkE5ZWR9IpLeDo8sygF-I11UMLv0,23
2
+ nextrec/__version__.py,sha256=rJbmNVFVceaHhRo-pizixUQW0k5sqoDVdU1TfXfG8CA,23
3
3
  nextrec/cli.py,sha256=JUprwpoVbT4tXsGgMpj9Y_5yYByQXYMliMdWd38ReKo,24441
4
4
  nextrec/basic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  nextrec/basic/activation.py,sha256=uzTWfCOtBSkbu_Gk9XBNTj8__s241CaYLJk6l8nGX9I,2885
6
6
  nextrec/basic/callback.py,sha256=nn1f8FG9c52vJ-gvwteqPbk3-1QuNS1vmhBlkENdb0I,14636
7
7
  nextrec/basic/features.py,sha256=GyCUzGPuizUofrZSSOdqHK84YhnX4MGTdu7Cx2OGhUA,4654
8
+ nextrec/basic/heads.py,sha256=0TqttvqkaUFkGg2bkMbFhOK63AuHs8sZVzvtbgpo1CE,3318
8
9
  nextrec/basic/layers.py,sha256=ZM3Nka3e2cit3e3peL0ukJCMgKZK1ovNFfAWvVOwlos,28556
9
10
  nextrec/basic/loggers.py,sha256=Zh1A5DVAFqlGglyaQ4_IMgvFbWAcXX5H3aHbCWA82nE,6524
10
- nextrec/basic/metrics.py,sha256=saNgM7kuHk9xqDxZF6x33irTaxeXCU-hxYTUQauuGgg,23074
11
- nextrec/basic/model.py,sha256=WGdGBK0oI8amRIor48xqIMij9UwrSkOMs0LhFczalzY,103710
11
+ nextrec/basic/metrics.py,sha256=1r6efTc9TpARNBt5X9ISoppTZflej6EdFkjPYHV-YZI,23162
12
+ nextrec/basic/model.py,sha256=MFJr6_FAT8-niEwZrqmo8oWzkjuspdjXRsizt1R056Q,103814
12
13
  nextrec/basic/session.py,sha256=UOG_-EgCOxvqZwCkiEd8sgNV2G1sm_HbzKYVQw8yYDI,4483
13
14
  nextrec/data/__init__.py,sha256=YZQjpty1pDCM7q_YNmiA2sa5kbujUw26ObLHWjMPjKY,1194
14
15
  nextrec/data/batch_utils.py,sha256=0bYGVX7RlhnHv_ZBaUngjDIpBNw-igCk98DgOsF7T6o,2879
@@ -25,28 +26,28 @@ nextrec/loss/pointwise.py,sha256=o9J3OznY0hlbDsUXqn3k-BBzYiuUH5dopz8QBFqS_kQ,734
25
26
  nextrec/models/generative/__init__.py,sha256=0MV3P-_ainPaTxmRBGWKUVCEt14KJvuvEHmRB3OQ1Fs,176
26
27
  nextrec/models/generative/tiger.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
28
  nextrec/models/multi_task/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
28
- nextrec/models/multi_task/esmm.py,sha256=AqesBZ4tOFNm7POCrHZ90h1zWWSViZAYfydUVOh2dEU,6545
29
- nextrec/models/multi_task/mmoe.py,sha256=aaQKcx4PL_mAanW3tkjAR886KmMCHTdBuu4p9EIKQJo,8657
30
- nextrec/models/multi_task/ple.py,sha256=fqkujPFGxxQOO_6nBZEz_UcxLEUoX_vCJsk0YOpxTg4,13084
31
- nextrec/models/multi_task/poso.py,sha256=J_Btxhm9JpFJMdQQHNNf9mMRHOgO7j1ts6VN5o4qJnk,19193
32
- nextrec/models/multi_task/share_bottom.py,sha256=DTWm6fpLCLiXimD-qk_0YIKT_9THMFDrnx4GDViXc_g,6583
29
+ nextrec/models/multi_task/esmm.py,sha256=zVHuDwVP0HNx1tgjJOc9iz_uFXfUwoy5pVPn-XX0zBs,6561
30
+ nextrec/models/multi_task/mmoe.py,sha256=lXK_kSw9TFTY6437yDA90_jerZi02lKdVM8zTCH3QMU,8673
31
+ nextrec/models/multi_task/ple.py,sha256=9X0eSMIFGj4Dj9yy2tpKEI7vp9TL5XvgKehMLm4wpJs,13100
32
+ nextrec/models/multi_task/poso.py,sha256=BomMkRx-tNtFiZ0mFBrRYms6PnffMvRlPkO9c5QZaiQ,19209
33
+ nextrec/models/multi_task/share_bottom.py,sha256=i1vt3z6Ycj4SjucHrzmJeuIfo2_VDwSF9ubJeNzKy6c,6599
33
34
  nextrec/models/ranking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
- nextrec/models/ranking/afm.py,sha256=96jGUPL4yTWobMIVBjHpOxl9AtAzCAGR8yw7Sy2JmdQ,10125
35
- nextrec/models/ranking/autoint.py,sha256=S6Cxnp1q2OErSYqmIix5P-b4qLWR-0dY6TMStuU6WLg,8109
36
- nextrec/models/ranking/dcn.py,sha256=whkjiKEuadl6oSP-NJdSOCOqvWZGX4EsId9oqlfVpa8,7299
37
- nextrec/models/ranking/dcn_v2.py,sha256=QnqQbJsrtQp4mtvnBXFUVefKyr4dw-gHNWrCbO26oHw,11163
38
- nextrec/models/ranking/deepfm.py,sha256=aXoK59e2KaaPe5vfyFW4YiHbX4E2iG3gxFCxmWo8RHk,5200
39
- nextrec/models/ranking/dien.py,sha256=c7Zs85vxhOgKHg5s0QcSLCn1xXCCSD177TMERgM_v8g,18958
40
- nextrec/models/ranking/din.py,sha256=gdUhuKiKXBNOALbK8fGhlbSeuDT8agcEdNSrC_wveHc,9422
41
- nextrec/models/ranking/eulernet.py,sha256=SQr7M_6GI1u09jpxzRasQLFAPLXcmqff69waER6fiD8,12201
42
- nextrec/models/ranking/ffm.py,sha256=9t685SViSU1J0ESz-lrYSXhf4d4BWLNYZXReeVEk3e8,11262
43
- nextrec/models/ranking/fibinet.py,sha256=_eroddVHooJcaGT8MqS4mUrtv5j4pnTmfI3FoAKOZhs,7919
44
- nextrec/models/ranking/fm.py,sha256=SsrSKK3y4xg5Lv-t3JLnZan55Hzze2AxAiVPuscy0bk,4536
45
- nextrec/models/ranking/lr.py,sha256=MUonlKyA77_bfshTupFjOhY5tiuSJxApFM-_yOk4Nwk,4008
46
- nextrec/models/ranking/masknet.py,sha256=tY1y2lO0iq82oylPN0SBnL5Bikc8weinFXpURyVT1hE,12373
47
- nextrec/models/ranking/pnn.py,sha256=FcNIFAw5J0ORGSR6L8ZK7NeXlJPpojwe_SpsxMQqCFw,8174
48
- nextrec/models/ranking/widedeep.py,sha256=-ghKfe_0puvlI9fBQr8lK3gXkfVvslGwP40AJTGqc7w,5077
49
- nextrec/models/ranking/xdeepfm.py,sha256=FMtl_zYO1Ty_2d9VWRsz6Jo-Xjw8vikpIQPZCDVavVY,8156
35
+ nextrec/models/ranking/afm.py,sha256=p3rNc957TaEc2tkhpbQyTup8a1oH34UJ2YKLqCuUD-4,10141
36
+ nextrec/models/ranking/autoint.py,sha256=OgNh-NpC9ragHbIJfMCnQJRS6mh0wINkw961rGp32_Q,8125
37
+ nextrec/models/ranking/dcn.py,sha256=c22ZBbBw3aEAISgtnsKZqhCvcYhM68AmEPZFS6MYO44,7315
38
+ nextrec/models/ranking/dcn_v2.py,sha256=PqhkSPhGUx52hZMd2JPBqYQr1o5twCg6M6hCaaTl3aY,11179
39
+ nextrec/models/ranking/deepfm.py,sha256=9K_TEptWwIr_SNC4yqN8PA2SzUKafZL94PVT2qH5Bag,5216
40
+ nextrec/models/ranking/dien.py,sha256=URKWeOPeRD5OIWNsAxgVvbetOSrBHoq2eO5rR5UJ0jU,18971
41
+ nextrec/models/ranking/din.py,sha256=Y8v0gONRt1OZORmn0hqMuzMfkvX0Nz1gByJ94jo3MUw,9435
42
+ nextrec/models/ranking/eulernet.py,sha256=Mz2TObgYmYR7jIzwkafaySIxSn7P0ZVVFJTHZ0QEPFs,12217
43
+ nextrec/models/ranking/ffm.py,sha256=0NB-ygR486lAJgarhOsdbaE8E0rbc1o8h5d8spWs5io,11278
44
+ nextrec/models/ranking/fibinet.py,sha256=VP0gNoQwoLKxniv2HmHzxlnR3YlrnQJt6--CwmAgsW4,7932
45
+ nextrec/models/ranking/fm.py,sha256=wX0VBvme41mFxSaT4Y1w4JfROL8EYT5KEe-hwRNr3zc,4552
46
+ nextrec/models/ranking/lr.py,sha256=hc7XKa1GJhRa2CJY8-sYR0AB_fvmVLYCFoDRNCg04rU,4024
47
+ nextrec/models/ranking/masknet.py,sha256=NNtIyAd-UHT78TvLcGbMvzR_aJRPzG3HBri1adNBrc4,12389
48
+ nextrec/models/ranking/pnn.py,sha256=6a78M6n6B9aMBSvm0qICyBKjccoZ9TWVkDo9enc5d08,8190
49
+ nextrec/models/ranking/widedeep.py,sha256=LNhLBmFP7M_z6ldxp99uMMK9s07EkP_9pjCAl6Hqp_Q,5093
50
+ nextrec/models/ranking/xdeepfm.py,sha256=q4ITrgFfLe7NABxJQuchs8yqW_-jKwgXIwmpB2Lvoio,8172
50
51
  nextrec/models/representation/__init__.py,sha256=O3QHMMXBszwM-mTl7bA3wawNZvDGet-QIv6Ys5GHGJ8,190
51
52
  nextrec/models/representation/autorec.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
52
53
  nextrec/models/representation/bpr.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -64,15 +65,15 @@ nextrec/models/retrieval/youtube_dnn.py,sha256=xtGPV6_5LeSZBKkrTaU1CmtxlhgYLvZmj
64
65
  nextrec/models/sequential/hstu.py,sha256=P2Kl7HEL3afwiCApGKQ6UbUNO9eNXXrB10H7iiF8cI0,19735
65
66
  nextrec/models/sequential/sasrec.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
66
67
  nextrec/utils/__init__.py,sha256=C-1l-suSsN_MlPlj_5LApyCRQLOao5l7bO0SccwKHw4,2598
67
- nextrec/utils/config.py,sha256=2zcjK4TeN8ow-JSXbWpqyh9C1vFeKnEsYHPg1x564KU,19969
68
- nextrec/utils/console.py,sha256=e94SiwA0gKn2pfpP94mY_jl-kFok3TCjxo298KdFuP4,11696
68
+ nextrec/utils/config.py,sha256=VgCh5fto8HGodwXPJacenqjxre3Aw6tw-mntW9n3OYA,20044
69
+ nextrec/utils/console.py,sha256=wHAGQT0HmPumWlMlqQuBVXPVGCWubgYez-vuzkOMiMQ,13646
69
70
  nextrec/utils/data.py,sha256=alruiWZFbmwy3kO12q42VXmtHmXFFjVULpHa43fx_mI,21098
70
71
  nextrec/utils/embedding.py,sha256=akAEc062MG2cD7VIOllHaqtwzAirQR2gq5iW7oKpGAU,1449
71
72
  nextrec/utils/feature.py,sha256=rsUAv3ELyDpehVw8nPEEsLCCIjuKGTJJZuFaWB_wrPk,633
72
73
  nextrec/utils/model.py,sha256=3B85a0IJCggI26dxv25IX8R_5yQPo7wXI0JIAns6bkQ,1727
73
74
  nextrec/utils/torch_utils.py,sha256=AKfYbSOJjEw874xsDB5IO3Ote4X7vnqzt_E0jJny0o8,13468
74
- nextrec-0.4.15.dist-info/METADATA,sha256=fNo7--haI7VdwCBY-FXqtrDXTK6pVrkMde-opJJsLjg,21298
75
- nextrec-0.4.15.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
76
- nextrec-0.4.15.dist-info/entry_points.txt,sha256=NN-dNSdfMRTv86bNXM7d3ZEPW2BQC6bRi7QP7i9cIps,45
77
- nextrec-0.4.15.dist-info/licenses/LICENSE,sha256=2fQfVKeafywkni7MYHyClC6RGGC3laLTXCNBx-ubtp0,1064
78
- nextrec-0.4.15.dist-info/RECORD,,
75
+ nextrec-0.4.17.dist-info/METADATA,sha256=X-ezN2q05f0SKvwE80Y8ThecPGQI1D7RJCl6FEDXyjA,21298
76
+ nextrec-0.4.17.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
77
+ nextrec-0.4.17.dist-info/entry_points.txt,sha256=NN-dNSdfMRTv86bNXM7d3ZEPW2BQC6bRi7QP7i9cIps,45
78
+ nextrec-0.4.17.dist-info/licenses/LICENSE,sha256=2fQfVKeafywkni7MYHyClC6RGGC3laLTXCNBx-ubtp0,1064
79
+ nextrec-0.4.17.dist-info/RECORD,,