nextrec 0.2.3__py3-none-any.whl → 0.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,20 +1,142 @@
1
1
  """
2
2
  Date: create on 09/11/2025
3
- Author:
4
- Yang Zhou,zyaztec@gmail.com
3
+ Checkpoint: edit on 24/11/2025
4
+ Author: Yang Zhou, zyaztec@gmail.com
5
5
  Reference:
6
- [1] Pan Z, Sun F, Liu J, et al. MaskNet: Introducing feature-wise gating blocks for high-dimensional
7
- sparse recommendation data (CCF-Tencent CTR competition solution, 2020).
6
+ [1] Wang Z, She Q, Zhang J. MaskNet: Introducing Feature-Wise
7
+ Multiplication to CTR Ranking Models by Instance-Guided Mask.
8
+
9
+ MaskNet is a CTR prediction model that introduces instance-guided,
10
+ feature-wise multiplicative interactions into deep ranking networks.
11
+ Instead of relying solely on additive feature interactions from MLPs,
12
+ MaskNet generates a personalized “mask” vector for each instance based
13
+ on its embedding representation. This mask selectively scales hidden
14
+ features through element-wise multiplication, enabling the network to
15
+ emphasize informative dimensions and suppress irrelevant noise.
16
+
17
+ Each MaskBlock consists of:
18
+ (1) Instance-Guided Mask Generation (two-layer MLP)
19
+ (2) Feature-wise Multiplication with hidden representations
20
+ (3) Layer Normalization and nonlinear transformation
21
+
22
+ By stacking (SerialMaskNet) or parallelizing (ParallelMaskNet) multiple
23
+ MaskBlocks, MaskNet enhances expressive power while remaining efficient,
24
+ improving CTR performance without heavy feature engineering.
25
+
26
+ Key Advantages:
27
+ - Learns higher-order interactions via multiplicative gating
28
+ - Instance-adaptive feature importance modulation
29
+ - Better discrimination of informative vs. noisy dimensions
30
+ - Flexible architecture for both sequential and parallel design
31
+
32
+ MaskNet 是一种用于 CTR 预估的模型,它在深度排序网络中引入了
33
+ 基于实例(Instance-Guided)的特征级逐元素(Feature-wise)
34
+ 乘法交互机制。
35
+
36
+ 与传统仅依赖 MLP 的加性特征交互不同,MaskNet 会根据每个样本的
37
+ embedding 表示生成一个个性化的 “mask” 向量,通过逐元素的乘法
38
+ 选择性地放大有效特征维度、抑制无关或噪声特征。
39
+
40
+ 每个 MaskBlock 包含以下关键步骤:
41
+ (1) 基于当前样本 embedding 的双层 MLP Mask 生成
42
+ (2) Mask 与隐藏表示之间的逐元素乘法交互
43
+ (3) Layer Normalization 与非线性变换
44
+
45
+ 通过串联(SerialMaskNet)或并联(ParallelMaskNet)
46
+ 多个 MaskBlock,MaskNet 在保持高效的同时显著增强了特征表达能力,
47
+ 在无需大量特征工程的情况下提升 CTR 模型性能。
48
+
49
+ 核心优势:
50
+ - 通过乘法门控学习高阶特征交互关系
51
+ - 针对每个样本自适应调整特征重要性
52
+ - 有效区分信息特征与噪声特征
53
+ - 支持灵活的串行与并行网络结构设计
8
54
  """
9
55
 
10
56
  import torch
11
57
  import torch.nn as nn
58
+ import torch.nn.functional as F
12
59
 
13
60
  from nextrec.basic.model import BaseModel
14
- from nextrec.basic.layers import EmbeddingLayer, LR, MLP, PredictionLayer
61
+ from nextrec.basic.layers import EmbeddingLayer, MLP, PredictionLayer
15
62
  from nextrec.basic.features import DenseFeature, SparseFeature, SequenceFeature
16
63
 
17
64
 
65
+ class InstanceGuidedMask(nn.Module):
66
+ def __init__(self, input_dim: int, hidden_dim: int, output_dim: int):
67
+ super().__init__()
68
+ self.fc1 = nn.Linear(input_dim, hidden_dim)
69
+ self.fc2 = nn.Linear(hidden_dim, output_dim)
70
+
71
+ def forward(self, v_emb_flat: torch.Tensor) -> torch.Tensor:
72
+ # v_emb_flat: [batch, features count * embedding_dim]
73
+ x = self.fc1(v_emb_flat)
74
+ x = F.relu(x)
75
+ v_mask = self.fc2(x)
76
+ return v_mask
77
+
78
+ class MaskBlockOnEmbedding(nn.Module):
79
+ def __init__(
80
+ self,
81
+ num_fields: int,
82
+ embedding_dim: int,
83
+ mask_hidden_dim: int,
84
+ hidden_dim: int,
85
+ ):
86
+ super().__init__()
87
+ self.num_fields = num_fields
88
+ self.embedding_dim = embedding_dim
89
+ self.input_dim = num_fields * embedding_dim # input_dim = features count * embedding_dim
90
+ self.ln_emb = nn.LayerNorm(embedding_dim)
91
+ self.mask_gen = InstanceGuidedMask(input_dim=self.input_dim, hidden_dim=mask_hidden_dim, output_dim=self.input_dim,)
92
+ self.ffn = nn.Linear(self.input_dim, hidden_dim)
93
+ self.ln_hid = nn.LayerNorm(hidden_dim)
94
+
95
+ # different from MaskBlockOnHidden: input is field embeddings
96
+ def forward(self, field_emb: torch.Tensor, v_emb_flat: torch.Tensor) -> torch.Tensor:
97
+ B = field_emb.size(0)
98
+ norm_emb = self.ln_emb(field_emb) # [B, features count, embedding_dim]
99
+ norm_emb_flat = norm_emb.view(B, -1) # [B, features count * embedding_dim]
100
+ v_mask = self.mask_gen(v_emb_flat) # [B, features count * embedding_dim]
101
+ v_masked_emb = v_mask * norm_emb_flat # [B, features count * embedding_dim]
102
+ hidden = self.ffn(v_masked_emb) # [B, hidden_dim]
103
+ hidden = self.ln_hid(hidden)
104
+ hidden = F.relu(hidden)
105
+
106
+ return hidden
107
+
108
+
109
+ class MaskBlockOnHidden(nn.Module):
110
+ def __init__(
111
+ self,
112
+ num_fields: int,
113
+ embedding_dim: int,
114
+ mask_hidden_dim: int,
115
+ hidden_dim: int,
116
+ ):
117
+ super().__init__()
118
+ self.num_fields = num_fields
119
+ self.embedding_dim = embedding_dim
120
+ self.hidden_dim = hidden_dim
121
+ self.v_emb_dim = num_fields * embedding_dim
122
+
123
+ self.ln_input = nn.LayerNorm(hidden_dim)
124
+ self.ln_output = nn.LayerNorm(hidden_dim)
125
+
126
+ self.mask_gen = InstanceGuidedMask(input_dim=self.v_emb_dim, hidden_dim=mask_hidden_dim, output_dim=hidden_dim,)
127
+ self.ffn = nn.Linear(hidden_dim, hidden_dim)
128
+
129
+ # different from MaskBlockOnEmbedding: input is hidden representation
130
+ def forward(self, hidden_in: torch.Tensor, v_emb_flat: torch.Tensor) -> torch.Tensor:
131
+ norm_hidden = self.ln_input(hidden_in)
132
+ v_mask = self.mask_gen(v_emb_flat)
133
+ v_masked_hid = v_mask * norm_hidden
134
+ out = self.ffn(v_masked_hid)
135
+ out = self.ln_output(out)
136
+ out = F.relu(out)
137
+ return out
138
+
139
+
18
140
  class MaskNet(BaseModel):
19
141
  @property
20
142
  def model_name(self):
@@ -22,29 +144,39 @@ class MaskNet(BaseModel):
22
144
 
23
145
  @property
24
146
  def task_type(self):
25
- return "binary"
26
-
27
- def __init__(self,
28
- dense_features: list[DenseFeature] | list = [],
29
- sparse_features: list[SparseFeature] | list = [],
30
- sequence_features: list[SequenceFeature] | list = [],
31
- num_blocks: int = 3,
32
- mask_hidden_dim: int = 64,
33
- block_dropout: float = 0.1,
34
- mlp_params: dict = {},
35
- target: list[str] | list = [],
36
- optimizer: str = "adam",
37
- optimizer_params: dict = {},
38
- loss: str | nn.Module | None = "bce",
39
- loss_params: dict | list[dict] | None = None,
40
- device: str = 'cpu',
41
- embedding_l1_reg=1e-6,
42
- dense_l1_reg=1e-5,
43
- embedding_l2_reg=1e-5,
44
- dense_l2_reg=1e-4,
45
- **kwargs):
46
-
47
- super(MaskNet, self).__init__(
147
+ return "binary_classification"
148
+
149
+ def __init__(
150
+ self,
151
+ dense_features: list[DenseFeature] | None = None,
152
+ sparse_features: list[SparseFeature] | None = None,
153
+ sequence_features: list[SequenceFeature] | None = None,
154
+ model_type: str = "parallel", # "serial" or "parallel"
155
+ num_blocks: int = 3,
156
+ mask_hidden_dim: int = 64,
157
+ block_hidden_dim: int = 256,
158
+ block_dropout: float = 0.0,
159
+ mlp_params: dict | None = None,
160
+ target: list[str] | None = None,
161
+ optimizer: str = "adam",
162
+ optimizer_params: dict | None = None,
163
+ loss: str | nn.Module | None = "bce",
164
+ loss_params: dict | list[dict] | None = None,
165
+ device: str = "cpu",
166
+ embedding_l1_reg: float = 1e-6,
167
+ dense_l1_reg: float = 1e-5,
168
+ embedding_l2_reg: float = 1e-5,
169
+ dense_l2_reg: float = 1e-4,
170
+ **kwargs,
171
+ ):
172
+ dense_features = dense_features or []
173
+ sparse_features = sparse_features or []
174
+ sequence_features = sequence_features or []
175
+ target = target or []
176
+ mlp_params = mlp_params or {}
177
+ optimizer_params = optimizer_params or {}
178
+
179
+ super().__init__(
48
180
  dense_features=dense_features,
49
181
  sparse_features=sparse_features,
50
182
  sequence_features=sequence_features,
@@ -56,45 +188,99 @@ class MaskNet(BaseModel):
56
188
  embedding_l2_reg=embedding_l2_reg,
57
189
  dense_l2_reg=dense_l2_reg,
58
190
  early_stop_patience=20,
59
- **kwargs
191
+ **kwargs,
60
192
  )
61
193
 
194
+ if loss is None:
195
+ loss = "bce"
62
196
  self.loss = loss
63
- if self.loss is None:
64
- self.loss = "bce"
65
-
66
- self.mask_features = sparse_features + sequence_features
67
- if len(self.mask_features) == 0:
68
- raise ValueError("MaskNet requires at least one sparse/sequence feature.")
197
+
198
+ self.dense_features = dense_features
199
+ self.sparse_features = sparse_features
200
+ self.sequence_features = sequence_features
201
+ self.mask_features = self.sparse_features + self.sequence_features + self.dense_features
202
+ assert len(self.mask_features) > 0, "MaskNet requires at least one feature for masking."
69
203
 
70
204
  self.embedding = EmbeddingLayer(features=self.mask_features)
205
+
71
206
  self.num_fields = len(self.mask_features)
72
- self.embedding_dim = self.mask_features[0].embedding_dim
73
- if any(f.embedding_dim != self.embedding_dim for f in self.mask_features):
74
- raise ValueError("MaskNet expects identical embedding_dim across mask_features.")
75
207
 
76
- self.num_blocks = max(1, num_blocks)
77
- self.field_dim = self.num_fields * self.embedding_dim
78
-
79
- self.linear = LR(self.field_dim)
80
- self.mask_generators = nn.ModuleList()
81
- for _ in range(self.num_blocks):
82
- self.mask_generators.append(
83
- nn.Sequential(
84
- nn.Linear(self.field_dim, mask_hidden_dim),
85
- nn.ReLU(),
86
- nn.Linear(mask_hidden_dim, self.num_fields)
208
+ self.embedding_dim = getattr(self.mask_features[0], "embedding_dim", None)
209
+ assert self.embedding_dim is not None, "MaskNet requires mask_features to have 'embedding_dim' defined."
210
+
211
+ for f in self.mask_features:
212
+ edim = getattr(f, "embedding_dim", None)
213
+ if edim is None or edim != self.embedding_dim:
214
+ raise ValueError(
215
+ f"MaskNet expects identical embedding_dim across all mask_features, "
216
+ f"but got {edim} for feature {getattr(f, 'name', type(f))}."
87
217
  )
218
+
219
+ self.v_emb_dim = self.num_fields * self.embedding_dim
220
+
221
+ self.model_type = model_type.lower()
222
+ assert self.model_type in ("serial", "parallel"), "model_type must be either 'serial' or 'parallel'."
223
+
224
+ self.num_blocks = max(1, num_blocks)
225
+ self.block_hidden_dim = block_hidden_dim
226
+ self.block_dropout = nn.Dropout(block_dropout) if block_dropout > 0 else nn.Identity()
227
+
228
+ if self.model_type == "serial":
229
+ self.first_block = MaskBlockOnEmbedding(
230
+ num_fields=self.num_fields,
231
+ embedding_dim=self.embedding_dim,
232
+ mask_hidden_dim=mask_hidden_dim,
233
+ hidden_dim=block_hidden_dim,
234
+ )
235
+
236
+ self.hidden_blocks = nn.ModuleList(
237
+ [
238
+ MaskBlockOnHidden(
239
+ num_fields=self.num_fields,
240
+ embedding_dim=self.embedding_dim,
241
+ mask_hidden_dim=mask_hidden_dim,
242
+ hidden_dim=block_hidden_dim,
243
+ )
244
+ for _ in range(self.num_blocks - 1)
245
+ ]
246
+ )
247
+
248
+ self.mask_blocks = nn.ModuleList([self.first_block, *self.hidden_blocks])
249
+ self.output_layer = nn.Linear(block_hidden_dim, 1)
250
+ self.final_mlp = None
251
+
252
+ else: # parallel
253
+ self.mask_blocks = nn.ModuleList(
254
+ [
255
+ MaskBlockOnEmbedding(
256
+ num_fields=self.num_fields,
257
+ embedding_dim=self.embedding_dim,
258
+ mask_hidden_dim=mask_hidden_dim,
259
+ hidden_dim=block_hidden_dim,
260
+ )
261
+ for _ in range(self.num_blocks)
262
+ ]
88
263
  )
89
264
 
90
- self.block_dropout = nn.Dropout(block_dropout)
91
- self.final_mlp = MLP(input_dim=self.field_dim * self.num_blocks, **mlp_params)
265
+ self.final_mlp = MLP(
266
+ input_dim=self.num_blocks * block_hidden_dim,
267
+ **mlp_params,
268
+ )
269
+ self.output_layer = None
270
+
92
271
  self.prediction_layer = PredictionLayer(task_type=self.task_type)
93
272
 
94
- self._register_regularization_weights(
95
- embedding_attr='embedding',
96
- include_modules=['linear', 'mask_generators', 'final_mlp']
97
- )
273
+ if self.model_type == "serial":
274
+ self._register_regularization_weights(
275
+ embedding_attr="embedding",
276
+ include_modules=["mask_blocks", "output_layer"],
277
+ )
278
+ # serial
279
+ else:
280
+ self._register_regularization_weights(
281
+ embedding_attr="embedding",
282
+ include_modules=["mask_blocks", "final_mlp"],
283
+ )
98
284
 
99
285
  self.compile(
100
286
  optimizer=optimizer,
@@ -103,25 +289,31 @@ class MaskNet(BaseModel):
103
289
  loss_params=loss_params,
104
290
  )
105
291
 
106
- def forward(self, x):
292
+ def forward(self, x: dict[str, torch.Tensor]) -> torch.Tensor:
107
293
  field_emb = self.embedding(x=x, features=self.mask_features, squeeze_dim=False)
108
- flat_input = field_emb.flatten(start_dim=1)
109
- y_linear = self.linear(flat_input)
110
-
111
- block_input = field_emb
112
- mask_input = flat_input
113
- block_outputs = []
114
- for mask_gen in self.mask_generators:
115
- mask_logits = mask_gen(mask_input)
116
- mask = torch.sigmoid(mask_logits).unsqueeze(-1)
117
- masked_emb = block_input * mask
118
- block_output = self.block_dropout(masked_emb.flatten(start_dim=1))
119
- block_outputs.append(block_output)
120
- mask_input = block_output
121
- block_input = masked_emb.view_as(field_emb)
122
-
123
- stacked = torch.cat(block_outputs, dim=1)
124
- y_deep = self.final_mlp(stacked)
125
-
126
- y = y_linear + y_deep
127
- return self.prediction_layer(y)
294
+ B = field_emb.size(0)
295
+ v_emb_flat = field_emb.view(B, -1) # flattened embeddings
296
+
297
+ if self.model_type == "parallel":
298
+ block_outputs = []
299
+ for block in self.mask_blocks:
300
+ h = block(field_emb, v_emb_flat) # [B, block_hidden_dim]
301
+ h = self.block_dropout(h)
302
+ block_outputs.append(h)
303
+
304
+ concat_hidden = torch.cat(block_outputs, dim=-1)
305
+ logit = self.final_mlp(concat_hidden) # [B, 1]
306
+
307
+ # serial
308
+ else:
309
+ hidden = self.first_block(field_emb, v_emb_flat)
310
+ hidden = self.block_dropout(hidden)
311
+
312
+ for block in self.hidden_blocks:
313
+ hidden = block(hidden, v_emb_flat)
314
+ hidden = self.block_dropout(hidden)
315
+
316
+ logit = self.output_layer(hidden) # [B, 1]
317
+
318
+ y = self.prediction_layer(logit)
319
+ return y
nextrec/utils/__init__.py CHANGED
@@ -1,14 +1,17 @@
1
1
  from .optimizer import get_optimizer, get_scheduler
2
2
  from .initializer import get_initializer
3
3
  from .embedding import get_auto_embedding_dim
4
- from . import optimizer, initializer, embedding
4
+ from .common import resolve_device
5
+ from . import optimizer, initializer, embedding, common
5
6
 
6
7
  __all__ = [
7
8
  'get_optimizer',
8
9
  'get_scheduler',
9
10
  'get_initializer',
10
11
  'get_auto_embedding_dim',
12
+ 'resolve_device',
11
13
  'optimizer',
12
14
  'initializer',
13
15
  'embedding',
16
+ 'common',
14
17
  ]
@@ -0,0 +1,16 @@
1
+ import torch
2
+ import platform
3
+
4
+ def resolve_device() -> str:
5
+ """Select a usable device with graceful fallback."""
6
+ if torch.cuda.is_available():
7
+ return "cuda"
8
+ if torch.backends.mps.is_available():
9
+ mac_ver = platform.mac_ver()[0]
10
+ try:
11
+ major, minor = (int(x) for x in mac_ver.split(".")[:2])
12
+ except Exception:
13
+ major, minor = 0, 0
14
+ if major >= 14:
15
+ return "mps"
16
+ return "cpu"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nextrec
3
- Version: 0.2.3
3
+ Version: 0.2.5
4
4
  Summary: A comprehensive recommendation library with match, ranking, and multi-task learning models
5
5
  Project-URL: Homepage, https://github.com/zerolovesea/NextRec
6
6
  Project-URL: Repository, https://github.com/zerolovesea/NextRec
@@ -61,7 +61,7 @@ Description-Content-Type: text/markdown
61
61
  ![Python](https://img.shields.io/badge/Python-3.10+-blue.svg)
62
62
  ![PyTorch](https://img.shields.io/badge/PyTorch-1.10+-ee4c2c.svg)
63
63
  ![License](https://img.shields.io/badge/License-Apache%202.0-green.svg)
64
- ![Version](https://img.shields.io/badge/Version-0.2.3-orange.svg)
64
+ ![Version](https://img.shields.io/badge/Version-0.2.5-orange.svg)
65
65
 
66
66
  English | [中文版](README_zh.md)
67
67
 
@@ -1,21 +1,21 @@
1
1
  nextrec/__init__.py,sha256=CvocnY2uBp0cjNkhrT6ogw0q2bN9s1GNp754FLO-7lo,1117
2
- nextrec/__version__.py,sha256=PNiDER4qM19h9zdsdfgKt2_dT4WgYK7EguJ8RU2qA_g,22
2
+ nextrec/__version__.py,sha256=Xsa3ayOMVkhUWm4t06YeyHE0apjpZefxLH4ylp0CDtU,22
3
3
  nextrec/basic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  nextrec/basic/activation.py,sha256=9EfYmwE0brTSKwx_0FIGQ_rybFBT9n_G-UWA7NAhMsI,2804
5
5
  nextrec/basic/callback.py,sha256=qkq3k8rP0g4BW2C3FSCdVt_CyCcJwJ-rUXjhT2p4LP8,1035
6
- nextrec/basic/features.py,sha256=TJfJgzWuy68lBKOeCzztcUK3ZtjHhK8oSMs8k0vXGlg,3961
7
- nextrec/basic/layers.py,sha256=mDNApSlPkmPSnIPj3BDHfDEjviLybWuSGrh61Zog2uk,38290
6
+ nextrec/basic/features.py,sha256=pQyqs-hGFN8T6goTxIjaXxw9S5PG57OWX7EpsKFlb4c,4194
7
+ nextrec/basic/layers.py,sha256=AezH_WYvU45eLw5EHmyAC69MCUkRTv_ZKbNW4WBC5iE,38071
8
8
  nextrec/basic/loggers.py,sha256=x8lzyyK-uqBN5XGOm1Cb33dmfc2bl114n6QeFTtE54k,3752
9
9
  nextrec/basic/metrics.py,sha256=w8tGe2tTbBNz9A1TNZF3jSpxcNC6QvFP5I0lWRd0Nw4,20398
10
- nextrec/basic/model.py,sha256=mtVttDWmrhdW-L1PAelJ90a1BW0q6bzG9roMvrPTU0U,66342
10
+ nextrec/basic/model.py,sha256=k9dbV4CP-1wvr-QLJ0dF6nYtGugXQIFR6J8kZDS9iSs,63968
11
11
  nextrec/basic/session.py,sha256=2kogEjgKAN1_ygelbwoqOs187BAcUnDTqXG1w_Pgb9I,4791
12
12
  nextrec/data/__init__.py,sha256=HLnARJrqDEVPTcofPSAEimy2Oj15vbomj-7UvT4ze_4,767
13
- nextrec/data/data_utils.py,sha256=vGZ378YM_JQXO9npRB7JqojJx1ovjbJCWI-7lQJkicA,6298
14
- nextrec/data/dataloader.py,sha256=LAKpcSHhq53scq8PKwF8uqxa8wQLG0FshjY3TQwIvBU,20459
15
- nextrec/data/preprocessor.py,sha256=N7m4PYGZE6AND0XyYRvXKYAUub9aHGb1qmxbBRxlZKA,42294
16
- nextrec/loss/__init__.py,sha256=t-wkqxcu5wdYlrb67-CxX9aOGom0CpMJK8Fe8KGDSEE,857
13
+ nextrec/data/data_utils.py,sha256=xz0xVBA7UzHXz7r_Yf0eMB5RrarPKg_1ZTdWvAqRZCM,7623
14
+ nextrec/data/dataloader.py,sha256=vtgt2B7rUmIG7wg-HE2ZesBaD6cuS2PwklFCWGA9tCw,14142
15
+ nextrec/data/preprocessor.py,sha256=J-3fo_LIz100spqCHoSpewYcneiZwhaCKyRdroPSjeY,41548
16
+ nextrec/loss/__init__.py,sha256=mO5t417BneZ8Ysa51GyjDaffjWyjzFgPXIQrrggasaQ,827
17
17
  nextrec/loss/listwise.py,sha256=LcYIPf6PGRtjV_AoWaAyp3rse904S2MghE5t032I07I,5628
18
- nextrec/loss/loss_utils.py,sha256=LnTkpMTS2bhbq4Lsjf3AUn1uBaOg1TaH5VO2R8hwARc,5324
18
+ nextrec/loss/loss_utils.py,sha256=cFNSvv-eaFwcfjLgxN3yNmf0L7ofC0ysgkUYjliLBpE,2535
19
19
  nextrec/loss/pairwise.py,sha256=RuQuTE-EkLaHQvT9m0CTAXxneTnVQLF1Pi9wblEClI8,3289
20
20
  nextrec/loss/pointwise.py,sha256=6QveizdohzQTxAoBKTVSoCBpp-fy3JC8vCjImXa7jL0,7157
21
21
  nextrec/models/generative/hstu.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -26,28 +26,29 @@ nextrec/models/match/dssm_v2.py,sha256=ywtqTy3YN9ke_7kzcDp7Fhtldw9RJz6yfewxALJb6
26
26
  nextrec/models/match/mind.py,sha256=XSUDlZ-V95JXHHBDUl5sz99SaVuQKDvf3TArVjwUexs,9417
27
27
  nextrec/models/match/sdm.py,sha256=96yfMQ6arP6JRhAkDTGEjlBiTteznMykrDV_3jqvvVk,10920
28
28
  nextrec/models/match/youtube_dnn.py,sha256=pnrz9LYu65Fj4neOriFF45B5k2-yYiiREtQICxxYXZ0,7546
29
- nextrec/models/multi_task/esmm.py,sha256=E9B6TlpnPUeyldTofyFg4B7SKByyxbiW2fUGHLOryO4,4883
29
+ nextrec/models/multi_task/esmm.py,sha256=U9DkYxwAhD_uWB8H2tsx1zfIStp5Xp8bvM6sc8S9tu4,4889
30
30
  nextrec/models/multi_task/mmoe.py,sha256=zhQr43Vfz7Kgi6B9pKPmaenp_38a_D7w4VvlpwCyF6Y,6165
31
31
  nextrec/models/multi_task/ple.py,sha256=otP6oLgzrJhwkLFItzNE-AtIPouObDkafRvWzTCxfNo,11335
32
32
  nextrec/models/multi_task/share_bottom.py,sha256=LL5HBVlvvBzHV2fLBRQMGIwpqmlxILTgU4c51XyTCo4,4517
33
33
  nextrec/models/ranking/__init__.py,sha256=-qe34zQEVwmxeTPGYCa6gbql9quT8DwB7-buHfA7Iig,428
34
34
  nextrec/models/ranking/afm.py,sha256=r9m1nEnc0m5d4pMtOxRMqOaXaBNCEkjJBFB-5wSHeFA,4540
35
- nextrec/models/ranking/autoint.py,sha256=GYzRynjn6Csq4b3qYIFWxLQ4Yl57_OQBeF2IY0Zhr9Q,5654
35
+ nextrec/models/ranking/autoint.py,sha256=TyA45mnXP7pZHhY6AbyK84qLpniGFXst7v7E_RlYiZM,7754
36
36
  nextrec/models/ranking/dcn.py,sha256=dUV5GbHypBGc9vVozk6aGYfIXq23c0deX-HFnIhZueg,4208
37
37
  nextrec/models/ranking/deepfm.py,sha256=y28yJxF__TZR3O1G2ufKZVtBRLgCgmlXWqvPgLzwm3U,3510
38
38
  nextrec/models/ranking/dien.py,sha256=E6s9TDwQfGSwtzzh8hG2F5gwgVxzVZPcptYvHLNzOLA,8475
39
39
  nextrec/models/ranking/din.py,sha256=j5tkT5k91CbsMlMr5vJOySrcY2_rFGxmEgJJ0McW7-Q,7196
40
40
  nextrec/models/ranking/fibinet.py,sha256=X6CbQbritvq5jql_Tvs4bn_tRla2zpWPplftZv8k6f0,4853
41
41
  nextrec/models/ranking/fm.py,sha256=3Qx_Fgowegr6UPQtEeTmHtOrbWzkvqH94ZTjOqRLu-E,2961
42
- nextrec/models/ranking/masknet.py,sha256=Bu0mZl2vKqcGnqCuUjPHjPRd1f-cDTeVwFj8Y_6v3C8,4639
42
+ nextrec/models/ranking/masknet.py,sha256=Tx5deIv7oShm4DdXX1IJL8Hz8-5uGqcPMK7pj00xTHg,12230
43
43
  nextrec/models/ranking/pnn.py,sha256=5RxIKdxD0XcGq-b_QDdwGRwk6b_5BQjyMvCw3Ibv2Kk,4957
44
44
  nextrec/models/ranking/widedeep.py,sha256=b6ctElaZPv5WSYDA4piYUBo3je0eJpWpWECwcuWavM4,3716
45
45
  nextrec/models/ranking/xdeepfm.py,sha256=I00J5tfE4tPluqeW-qrNtE4V_9fC7-rgFvA0Fxqka7o,4274
46
- nextrec/utils/__init__.py,sha256=6x3OZbqks2gtgJd00y_-Y8QiAT42x5t14ARHQ-ULQDo,350
46
+ nextrec/utils/__init__.py,sha256=A3mH6M-DmDBWQ1stIIaTsNzvUy_AKaUWtRmrzU5R3FE,429
47
+ nextrec/utils/common.py,sha256=-n4wSbP-EptpzLcJv6fV-ytBzPliOj6m-mrK_Qk6s4A,458
47
48
  nextrec/utils/embedding.py,sha256=yxYSdFx0cJITh3Gf-K4SdhwRtKGcI0jOsyBgZ0NLa_c,465
48
49
  nextrec/utils/initializer.py,sha256=ffYOs5QuIns_d_-5e40iNtg6s1ftgREJN-ueq_NbDQE,1647
49
50
  nextrec/utils/optimizer.py,sha256=85ifoy2IQgjPHOqLqr1ho7XBGE_0ry1yEB9efS6C2lM,2446
50
- nextrec-0.2.3.dist-info/METADATA,sha256=SpkwkLdg4MQLeysCCr_0fJXak-Vo139Cz3HFLXR-4Z0,11425
51
- nextrec-0.2.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
52
- nextrec-0.2.3.dist-info/licenses/LICENSE,sha256=2fQfVKeafywkni7MYHyClC6RGGC3laLTXCNBx-ubtp0,1064
53
- nextrec-0.2.3.dist-info/RECORD,,
51
+ nextrec-0.2.5.dist-info/METADATA,sha256=Ya8KTj9x1ozIaciXYTKnTFBLGiC4buUBIz-jVHHAM3s,11425
52
+ nextrec-0.2.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
53
+ nextrec-0.2.5.dist-info/licenses/LICENSE,sha256=2fQfVKeafywkni7MYHyClC6RGGC3laLTXCNBx-ubtp0,1064
54
+ nextrec-0.2.5.dist-info/RECORD,,