nextrec 0.2.3__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nextrec/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.2.3"
1
+ __version__ = "0.2.4"
@@ -1,12 +1,57 @@
1
1
  """
2
2
  Date: create on 09/11/2025
3
- Author:
4
- Yang Zhou,zyaztec@gmail.com
3
+ Checkpoint: edit on 24/11/2025
4
+ Author: Yang Zhou,zyaztec@gmail.com
5
5
  Reference:
6
- [1] Song W, Shi C, Xiao Z, et al. Autoint: Automatic feature interaction learning via
7
- self-attentive neural networks[C]//Proceedings of the 28th ACM international conference
8
- on information and knowledge management. 2019: 1161-1170.
9
- (https://arxiv.org/abs/1810.11921)
6
+ [1] Song W, Shi C, Xiao Z, et al. Autoint: Automatic feature interaction learning via
7
+ self-attentive neural networks[C]//Proceedings of the 28th ACM international conference
8
+ on information and knowledge management. 2019: 1161-1170.
9
+ (https://arxiv.org/abs/1810.11921)
10
+
11
+ AutoInt is a CTR prediction model that leverages multi-head self-attention
12
+ to automatically learn high-order feature interactions in an explicit and
13
+ interpretable way. Instead of relying on manual feature engineering or
14
+ implicit MLP-based transformations, AutoInt models feature dependencies
15
+ by attending over all embedded fields and capturing their contextual
16
+ relationships.
17
+
18
+ In each Interacting Layer:
19
+ (1) Each field embedding is projected into multiple attention heads
20
+ (2) Scaled dot-product attention computes feature-to-feature interactions
21
+ (3) Outputs are aggregated and passed through residual connections
22
+ (4) Layer Normalization ensures stable optimization
23
+
24
+ By stacking multiple Interacting Layers, AutoInt progressively discovers
25
+ higher-order feature interactions, while maintaining transparency since
26
+ attention weights explicitly show which features interact.
27
+
28
+ Key Advantages:
29
+ - Explicit modeling of high-order feature interactions
30
+ - Multi-head attention enhances representation diversity
31
+ - Residual structure facilitates deep interaction learning
32
+ - Attention weights provide interpretability of feature relations
33
+ - Eliminates heavy manual feature engineering
34
+
35
+ AutoInt 是一个 CTR 预估模型,通过多头自注意力机制显式学习高阶特征交互,
36
+ 并具有良好的可解释性。不同于依赖人工特征工程或 MLP 隐式建模的方法,
37
+ AutoInt 通过对所有特征 embedding 进行注意力计算,捕捉特征之间的上下文依赖关系。
38
+
39
+ 在每个 Interacting Layer(交互层)中:
40
+ (1) 每个特征 embedding 通过投影分成多个注意力头
41
+ (2) 使用缩放点积注意力计算特征间交互权重
42
+ (3) 将多头输出进行聚合,并使用残差连接
43
+ (4) Layer Normalization 确保训练稳定性
44
+
45
+ 通过堆叠多个交互层,AutoInt 能逐步学习更高阶的特征交互;
46
+ 同时由于注意力权重可视化,模型具有明确的可解释能力,
47
+ 能展示哪些特征之间的关系最重要。
48
+
49
+ 主要优点:
50
+ - 显式建模高阶特征交互
51
+ - 多头机制增强表示能力
52
+ - 残差结构支持深层交互学习
53
+ - 注意力权重天然具备可解释性
54
+ - 减少繁重的人工特征工程工作
10
55
  """
11
56
 
12
57
  import torch
@@ -80,7 +125,6 @@ class AutoInt(BaseModel):
80
125
 
81
126
  # Project embeddings to attention embedding dimension
82
127
  num_fields = len(self.interaction_features)
83
- total_embedding_dim = sum([f.embedding_dim for f in self.interaction_features])
84
128
 
85
129
  # If embeddings have different dimensions, project them to att_embedding_dim
86
130
  self.need_projection = not all(f.embedding_dim == att_embedding_dim for f in self.interaction_features)
@@ -1,20 +1,142 @@
1
1
  """
2
2
  Date: create on 09/11/2025
3
- Author:
4
- Yang Zhou,zyaztec@gmail.com
3
+ Checkpoint: edit on 24/11/2025
4
+ Author: Yang Zhou, zyaztec@gmail.com
5
5
  Reference:
6
- [1] Pan Z, Sun F, Liu J, et al. MaskNet: Introducing feature-wise gating blocks for high-dimensional
7
- sparse recommendation data (CCF-Tencent CTR competition solution, 2020).
6
+ [1] Wang Z, She Q, Zhang J. MaskNet: Introducing Feature-Wise
7
+ Multiplication to CTR Ranking Models by Instance-Guided Mask.
8
+
9
+ MaskNet is a CTR prediction model that introduces instance-guided,
10
+ feature-wise multiplicative interactions into deep ranking networks.
11
+ Instead of relying solely on additive feature interactions from MLPs,
12
+ MaskNet generates a personalized “mask” vector for each instance based
13
+ on its embedding representation. This mask selectively scales hidden
14
+ features through element-wise multiplication, enabling the network to
15
+ emphasize informative dimensions and suppress irrelevant noise.
16
+
17
+ Each MaskBlock consists of:
18
+ (1) Instance-Guided Mask Generation (two-layer MLP)
19
+ (2) Feature-wise Multiplication with hidden representations
20
+ (3) Layer Normalization and nonlinear transformation
21
+
22
+ By stacking (SerialMaskNet) or parallelizing (ParallelMaskNet) multiple
23
+ MaskBlocks, MaskNet enhances expressive power while remaining efficient,
24
+ improving CTR performance without heavy feature engineering.
25
+
26
+ Key Advantages:
27
+ - Learns higher-order interactions via multiplicative gating
28
+ - Instance-adaptive feature importance modulation
29
+ - Better discrimination of informative vs. noisy dimensions
30
+ - Flexible architecture for both sequential and parallel design
31
+
32
+ MaskNet 是一种用于 CTR 预估的模型,它在深度排序网络中引入了
33
+ 基于实例(Instance-Guided)的特征级逐元素(Feature-wise)
34
+ 乘法交互机制。
35
+
36
+ 与传统仅依赖 MLP 的加性特征交互不同,MaskNet 会根据每个样本的
37
+ embedding 表示生成一个个性化的 “mask” 向量,通过逐元素的乘法
38
+ 选择性地放大有效特征维度、抑制无关或噪声特征。
39
+
40
+ 每个 MaskBlock 包含以下关键步骤:
41
+ (1) 基于当前样本 embedding 的双层 MLP Mask 生成
42
+ (2) Mask 与隐藏表示之间的逐元素乘法交互
43
+ (3) Layer Normalization 与非线性变换
44
+
45
+ 通过串联(SerialMaskNet)或并联(ParallelMaskNet)
46
+ 多个 MaskBlock,MaskNet 在保持高效的同时显著增强了特征表达能力,
47
+ 在无需大量特征工程的情况下提升 CTR 模型性能。
48
+
49
+ 核心优势:
50
+ - 通过乘法门控学习高阶特征交互关系
51
+ - 针对每个样本自适应调整特征重要性
52
+ - 有效区分信息特征与噪声特征
53
+ - 支持灵活的串行与并行网络结构设计
8
54
  """
9
55
 
10
56
  import torch
11
57
  import torch.nn as nn
58
+ import torch.nn.functional as F
12
59
 
13
60
  from nextrec.basic.model import BaseModel
14
- from nextrec.basic.layers import EmbeddingLayer, LR, MLP, PredictionLayer
61
+ from nextrec.basic.layers import EmbeddingLayer, MLP, PredictionLayer
15
62
  from nextrec.basic.features import DenseFeature, SparseFeature, SequenceFeature
16
63
 
17
64
 
65
+ class InstanceGuidedMask(nn.Module):
66
+ def __init__(self, input_dim: int, hidden_dim: int, output_dim: int):
67
+ super().__init__()
68
+ self.fc1 = nn.Linear(input_dim, hidden_dim)
69
+ self.fc2 = nn.Linear(hidden_dim, output_dim)
70
+
71
+ def forward(self, v_emb_flat: torch.Tensor) -> torch.Tensor:
72
+ # v_emb_flat: [batch, features count * embedding_dim]
73
+ x = self.fc1(v_emb_flat)
74
+ x = F.relu(x)
75
+ v_mask = self.fc2(x)
76
+ return v_mask
77
+
78
+ class MaskBlockOnEmbedding(nn.Module):
79
+ def __init__(
80
+ self,
81
+ num_fields: int,
82
+ embedding_dim: int,
83
+ mask_hidden_dim: int,
84
+ hidden_dim: int,
85
+ ):
86
+ super().__init__()
87
+ self.num_fields = num_fields
88
+ self.embedding_dim = embedding_dim
89
+ self.input_dim = num_fields * embedding_dim # input_dim = features count * embedding_dim
90
+ self.ln_emb = nn.LayerNorm(embedding_dim)
91
+ self.mask_gen = InstanceGuidedMask(input_dim=self.input_dim, hidden_dim=mask_hidden_dim, output_dim=self.input_dim,)
92
+ self.ffn = nn.Linear(self.input_dim, hidden_dim)
93
+ self.ln_hid = nn.LayerNorm(hidden_dim)
94
+
95
+ # different from MaskBlockOnHidden: input is field embeddings
96
+ def forward(self, field_emb: torch.Tensor, v_emb_flat: torch.Tensor) -> torch.Tensor:
97
+ B = field_emb.size(0)
98
+ norm_emb = self.ln_emb(field_emb) # [B, features count, embedding_dim]
99
+ norm_emb_flat = norm_emb.view(B, -1) # [B, features count * embedding_dim]
100
+ v_mask = self.mask_gen(v_emb_flat) # [B, features count * embedding_dim]
101
+ v_masked_emb = v_mask * norm_emb_flat # [B, features count * embedding_dim]
102
+ hidden = self.ffn(v_masked_emb) # [B, hidden_dim]
103
+ hidden = self.ln_hid(hidden)
104
+ hidden = F.relu(hidden)
105
+
106
+ return hidden
107
+
108
+
109
+ class MaskBlockOnHidden(nn.Module):
110
+ def __init__(
111
+ self,
112
+ num_fields: int,
113
+ embedding_dim: int,
114
+ mask_hidden_dim: int,
115
+ hidden_dim: int,
116
+ ):
117
+ super().__init__()
118
+ self.num_fields = num_fields
119
+ self.embedding_dim = embedding_dim
120
+ self.hidden_dim = hidden_dim
121
+ self.v_emb_dim = num_fields * embedding_dim
122
+
123
+ self.ln_input = nn.LayerNorm(hidden_dim)
124
+ self.ln_output = nn.LayerNorm(hidden_dim)
125
+
126
+ self.mask_gen = InstanceGuidedMask(input_dim=self.v_emb_dim, hidden_dim=mask_hidden_dim, output_dim=hidden_dim,)
127
+ self.ffn = nn.Linear(hidden_dim, hidden_dim)
128
+
129
+ # different from MaskBlockOnEmbedding: input is hidden representation
130
+ def forward(self, hidden_in: torch.Tensor, v_emb_flat: torch.Tensor) -> torch.Tensor:
131
+ norm_hidden = self.ln_input(hidden_in)
132
+ v_mask = self.mask_gen(v_emb_flat)
133
+ v_masked_hid = v_mask * norm_hidden
134
+ out = self.ffn(v_masked_hid)
135
+ out = self.ln_output(out)
136
+ out = F.relu(out)
137
+ return out
138
+
139
+
18
140
  class MaskNet(BaseModel):
19
141
  @property
20
142
  def model_name(self):
@@ -23,28 +145,38 @@ class MaskNet(BaseModel):
23
145
  @property
24
146
  def task_type(self):
25
147
  return "binary"
26
-
27
- def __init__(self,
28
- dense_features: list[DenseFeature] | list = [],
29
- sparse_features: list[SparseFeature] | list = [],
30
- sequence_features: list[SequenceFeature] | list = [],
31
- num_blocks: int = 3,
32
- mask_hidden_dim: int = 64,
33
- block_dropout: float = 0.1,
34
- mlp_params: dict = {},
35
- target: list[str] | list = [],
36
- optimizer: str = "adam",
37
- optimizer_params: dict = {},
38
- loss: str | nn.Module | None = "bce",
39
- loss_params: dict | list[dict] | None = None,
40
- device: str = 'cpu',
41
- embedding_l1_reg=1e-6,
42
- dense_l1_reg=1e-5,
43
- embedding_l2_reg=1e-5,
44
- dense_l2_reg=1e-4,
45
- **kwargs):
46
-
47
- super(MaskNet, self).__init__(
148
+
149
+ def __init__(
150
+ self,
151
+ dense_features: list[DenseFeature] | None = None,
152
+ sparse_features: list[SparseFeature] | None = None,
153
+ sequence_features: list[SequenceFeature] | None = None,
154
+ model_type: str = "parallel", # "serial" or "parallel"
155
+ num_blocks: int = 3,
156
+ mask_hidden_dim: int = 64,
157
+ block_hidden_dim: int = 256,
158
+ block_dropout: float = 0.0,
159
+ mlp_params: dict | None = None,
160
+ target: list[str] | None = None,
161
+ optimizer: str = "adam",
162
+ optimizer_params: dict | None = None,
163
+ loss: str | nn.Module | None = "bce",
164
+ loss_params: dict | list[dict] | None = None,
165
+ device: str = "cpu",
166
+ embedding_l1_reg: float = 1e-6,
167
+ dense_l1_reg: float = 1e-5,
168
+ embedding_l2_reg: float = 1e-5,
169
+ dense_l2_reg: float = 1e-4,
170
+ **kwargs,
171
+ ):
172
+ dense_features = dense_features or []
173
+ sparse_features = sparse_features or []
174
+ sequence_features = sequence_features or []
175
+ target = target or []
176
+ mlp_params = mlp_params or {}
177
+ optimizer_params = optimizer_params or {}
178
+
179
+ super().__init__(
48
180
  dense_features=dense_features,
49
181
  sparse_features=sparse_features,
50
182
  sequence_features=sequence_features,
@@ -56,45 +188,99 @@ class MaskNet(BaseModel):
56
188
  embedding_l2_reg=embedding_l2_reg,
57
189
  dense_l2_reg=dense_l2_reg,
58
190
  early_stop_patience=20,
59
- **kwargs
191
+ **kwargs,
60
192
  )
61
193
 
194
+ if loss is None:
195
+ loss = "bce"
62
196
  self.loss = loss
63
- if self.loss is None:
64
- self.loss = "bce"
65
-
66
- self.mask_features = sparse_features + sequence_features
67
- if len(self.mask_features) == 0:
68
- raise ValueError("MaskNet requires at least one sparse/sequence feature.")
197
+
198
+ self.dense_features = dense_features
199
+ self.sparse_features = sparse_features
200
+ self.sequence_features = sequence_features
201
+ self.mask_features = self.sparse_features + self.sequence_features + self.dense_features
202
+ assert len(self.mask_features) > 0, "MaskNet requires at least one feature for masking."
69
203
 
70
204
  self.embedding = EmbeddingLayer(features=self.mask_features)
205
+
71
206
  self.num_fields = len(self.mask_features)
72
- self.embedding_dim = self.mask_features[0].embedding_dim
73
- if any(f.embedding_dim != self.embedding_dim for f in self.mask_features):
74
- raise ValueError("MaskNet expects identical embedding_dim across mask_features.")
75
207
 
76
- self.num_blocks = max(1, num_blocks)
77
- self.field_dim = self.num_fields * self.embedding_dim
78
-
79
- self.linear = LR(self.field_dim)
80
- self.mask_generators = nn.ModuleList()
81
- for _ in range(self.num_blocks):
82
- self.mask_generators.append(
83
- nn.Sequential(
84
- nn.Linear(self.field_dim, mask_hidden_dim),
85
- nn.ReLU(),
86
- nn.Linear(mask_hidden_dim, self.num_fields)
208
+ self.embedding_dim = getattr(self.mask_features[0], "embedding_dim", None)
209
+ assert self.embedding_dim is not None, "MaskNet requires mask_features to have 'embedding_dim' defined."
210
+
211
+ for f in self.mask_features:
212
+ edim = getattr(f, "embedding_dim", None)
213
+ if edim is None or edim != self.embedding_dim:
214
+ raise ValueError(
215
+ f"MaskNet expects identical embedding_dim across all mask_features, "
216
+ f"but got {edim} for feature {getattr(f, 'name', type(f))}."
87
217
  )
218
+
219
+ self.v_emb_dim = self.num_fields * self.embedding_dim
220
+
221
+ self.model_type = model_type.lower()
222
+ assert self.model_type in ("serial", "parallel"), "model_type must be either 'serial' or 'parallel'."
223
+
224
+ self.num_blocks = max(1, num_blocks)
225
+ self.block_hidden_dim = block_hidden_dim
226
+ self.block_dropout = nn.Dropout(block_dropout) if block_dropout > 0 else nn.Identity()
227
+
228
+ if self.model_type == "serial":
229
+ self.first_block = MaskBlockOnEmbedding(
230
+ num_fields=self.num_fields,
231
+ embedding_dim=self.embedding_dim,
232
+ mask_hidden_dim=mask_hidden_dim,
233
+ hidden_dim=block_hidden_dim,
234
+ )
235
+
236
+ self.hidden_blocks = nn.ModuleList(
237
+ [
238
+ MaskBlockOnHidden(
239
+ num_fields=self.num_fields,
240
+ embedding_dim=self.embedding_dim,
241
+ mask_hidden_dim=mask_hidden_dim,
242
+ hidden_dim=block_hidden_dim,
243
+ )
244
+ for _ in range(self.num_blocks - 1)
245
+ ]
246
+ )
247
+
248
+ self.mask_blocks = nn.ModuleList([self.first_block, *self.hidden_blocks])
249
+ self.output_layer = nn.Linear(block_hidden_dim, 1)
250
+ self.final_mlp = None
251
+
252
+ else: # parallel
253
+ self.mask_blocks = nn.ModuleList(
254
+ [
255
+ MaskBlockOnEmbedding(
256
+ num_fields=self.num_fields,
257
+ embedding_dim=self.embedding_dim,
258
+ mask_hidden_dim=mask_hidden_dim,
259
+ hidden_dim=block_hidden_dim,
260
+ )
261
+ for _ in range(self.num_blocks)
262
+ ]
88
263
  )
89
264
 
90
- self.block_dropout = nn.Dropout(block_dropout)
91
- self.final_mlp = MLP(input_dim=self.field_dim * self.num_blocks, **mlp_params)
265
+ self.final_mlp = MLP(
266
+ input_dim=self.num_blocks * block_hidden_dim,
267
+ **mlp_params,
268
+ )
269
+ self.output_layer = None
270
+
92
271
  self.prediction_layer = PredictionLayer(task_type=self.task_type)
93
272
 
94
- self._register_regularization_weights(
95
- embedding_attr='embedding',
96
- include_modules=['linear', 'mask_generators', 'final_mlp']
97
- )
273
+ if self.model_type == "serial":
274
+ self._register_regularization_weights(
275
+ embedding_attr="embedding",
276
+ include_modules=["mask_blocks", "output_layer"],
277
+ )
278
+ # serial
279
+ else:
280
+ self._register_regularization_weights(
281
+ embedding_attr="embedding",
282
+ include_modules=["mask_blocks", "final_mlp"],
283
+ )
98
284
 
99
285
  self.compile(
100
286
  optimizer=optimizer,
@@ -103,25 +289,31 @@ class MaskNet(BaseModel):
103
289
  loss_params=loss_params,
104
290
  )
105
291
 
106
- def forward(self, x):
292
+ def forward(self, x: dict[str, torch.Tensor]) -> torch.Tensor:
107
293
  field_emb = self.embedding(x=x, features=self.mask_features, squeeze_dim=False)
108
- flat_input = field_emb.flatten(start_dim=1)
109
- y_linear = self.linear(flat_input)
110
-
111
- block_input = field_emb
112
- mask_input = flat_input
113
- block_outputs = []
114
- for mask_gen in self.mask_generators:
115
- mask_logits = mask_gen(mask_input)
116
- mask = torch.sigmoid(mask_logits).unsqueeze(-1)
117
- masked_emb = block_input * mask
118
- block_output = self.block_dropout(masked_emb.flatten(start_dim=1))
119
- block_outputs.append(block_output)
120
- mask_input = block_output
121
- block_input = masked_emb.view_as(field_emb)
122
-
123
- stacked = torch.cat(block_outputs, dim=1)
124
- y_deep = self.final_mlp(stacked)
125
-
126
- y = y_linear + y_deep
127
- return self.prediction_layer(y)
294
+ B = field_emb.size(0)
295
+ v_emb_flat = field_emb.view(B, -1) # flattened embeddings
296
+
297
+ if self.model_type == "parallel":
298
+ block_outputs = []
299
+ for block in self.mask_blocks:
300
+ h = block(field_emb, v_emb_flat) # [B, block_hidden_dim]
301
+ h = self.block_dropout(h)
302
+ block_outputs.append(h)
303
+
304
+ concat_hidden = torch.cat(block_outputs, dim=-1)
305
+ logit = self.final_mlp(concat_hidden) # [B, 1]
306
+
307
+ # serial
308
+ else:
309
+ hidden = self.first_block(field_emb, v_emb_flat)
310
+ hidden = self.block_dropout(hidden)
311
+
312
+ for block in self.hidden_blocks:
313
+ hidden = block(hidden, v_emb_flat)
314
+ hidden = self.block_dropout(hidden)
315
+
316
+ logit = self.output_layer(hidden) # [B, 1]
317
+
318
+ y = self.prediction_layer(logit)
319
+ return y
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nextrec
3
- Version: 0.2.3
3
+ Version: 0.2.4
4
4
  Summary: A comprehensive recommendation library with match, ranking, and multi-task learning models
5
5
  Project-URL: Homepage, https://github.com/zerolovesea/NextRec
6
6
  Project-URL: Repository, https://github.com/zerolovesea/NextRec
@@ -61,7 +61,7 @@ Description-Content-Type: text/markdown
61
61
  ![Python](https://img.shields.io/badge/Python-3.10+-blue.svg)
62
62
  ![PyTorch](https://img.shields.io/badge/PyTorch-1.10+-ee4c2c.svg)
63
63
  ![License](https://img.shields.io/badge/License-Apache%202.0-green.svg)
64
- ![Version](https://img.shields.io/badge/Version-0.2.3-orange.svg)
64
+ ![Version](https://img.shields.io/badge/Version-0.2.4-orange.svg)
65
65
 
66
66
  English | [中文版](README_zh.md)
67
67
 
@@ -1,5 +1,5 @@
1
1
  nextrec/__init__.py,sha256=CvocnY2uBp0cjNkhrT6ogw0q2bN9s1GNp754FLO-7lo,1117
2
- nextrec/__version__.py,sha256=PNiDER4qM19h9zdsdfgKt2_dT4WgYK7EguJ8RU2qA_g,22
2
+ nextrec/__version__.py,sha256=SBl2EPFW-ltPvQ7vbVWItyAsz3aKYIpjO7vcfr84GkU,22
3
3
  nextrec/basic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  nextrec/basic/activation.py,sha256=9EfYmwE0brTSKwx_0FIGQ_rybFBT9n_G-UWA7NAhMsI,2804
5
5
  nextrec/basic/callback.py,sha256=qkq3k8rP0g4BW2C3FSCdVt_CyCcJwJ-rUXjhT2p4LP8,1035
@@ -32,14 +32,14 @@ nextrec/models/multi_task/ple.py,sha256=otP6oLgzrJhwkLFItzNE-AtIPouObDkafRvWzTCx
32
32
  nextrec/models/multi_task/share_bottom.py,sha256=LL5HBVlvvBzHV2fLBRQMGIwpqmlxILTgU4c51XyTCo4,4517
33
33
  nextrec/models/ranking/__init__.py,sha256=-qe34zQEVwmxeTPGYCa6gbql9quT8DwB7-buHfA7Iig,428
34
34
  nextrec/models/ranking/afm.py,sha256=r9m1nEnc0m5d4pMtOxRMqOaXaBNCEkjJBFB-5wSHeFA,4540
35
- nextrec/models/ranking/autoint.py,sha256=GYzRynjn6Csq4b3qYIFWxLQ4Yl57_OQBeF2IY0Zhr9Q,5654
35
+ nextrec/models/ranking/autoint.py,sha256=TyA45mnXP7pZHhY6AbyK84qLpniGFXst7v7E_RlYiZM,7754
36
36
  nextrec/models/ranking/dcn.py,sha256=dUV5GbHypBGc9vVozk6aGYfIXq23c0deX-HFnIhZueg,4208
37
37
  nextrec/models/ranking/deepfm.py,sha256=y28yJxF__TZR3O1G2ufKZVtBRLgCgmlXWqvPgLzwm3U,3510
38
38
  nextrec/models/ranking/dien.py,sha256=E6s9TDwQfGSwtzzh8hG2F5gwgVxzVZPcptYvHLNzOLA,8475
39
39
  nextrec/models/ranking/din.py,sha256=j5tkT5k91CbsMlMr5vJOySrcY2_rFGxmEgJJ0McW7-Q,7196
40
40
  nextrec/models/ranking/fibinet.py,sha256=X6CbQbritvq5jql_Tvs4bn_tRla2zpWPplftZv8k6f0,4853
41
41
  nextrec/models/ranking/fm.py,sha256=3Qx_Fgowegr6UPQtEeTmHtOrbWzkvqH94ZTjOqRLu-E,2961
42
- nextrec/models/ranking/masknet.py,sha256=Bu0mZl2vKqcGnqCuUjPHjPRd1f-cDTeVwFj8Y_6v3C8,4639
42
+ nextrec/models/ranking/masknet.py,sha256=hU3m270vd9DWH2_Hh1hYiCGaF9fKC3eIsWQLSA-Gdf8,12215
43
43
  nextrec/models/ranking/pnn.py,sha256=5RxIKdxD0XcGq-b_QDdwGRwk6b_5BQjyMvCw3Ibv2Kk,4957
44
44
  nextrec/models/ranking/widedeep.py,sha256=b6ctElaZPv5WSYDA4piYUBo3je0eJpWpWECwcuWavM4,3716
45
45
  nextrec/models/ranking/xdeepfm.py,sha256=I00J5tfE4tPluqeW-qrNtE4V_9fC7-rgFvA0Fxqka7o,4274
@@ -47,7 +47,7 @@ nextrec/utils/__init__.py,sha256=6x3OZbqks2gtgJd00y_-Y8QiAT42x5t14ARHQ-ULQDo,350
47
47
  nextrec/utils/embedding.py,sha256=yxYSdFx0cJITh3Gf-K4SdhwRtKGcI0jOsyBgZ0NLa_c,465
48
48
  nextrec/utils/initializer.py,sha256=ffYOs5QuIns_d_-5e40iNtg6s1ftgREJN-ueq_NbDQE,1647
49
49
  nextrec/utils/optimizer.py,sha256=85ifoy2IQgjPHOqLqr1ho7XBGE_0ry1yEB9efS6C2lM,2446
50
- nextrec-0.2.3.dist-info/METADATA,sha256=SpkwkLdg4MQLeysCCr_0fJXak-Vo139Cz3HFLXR-4Z0,11425
51
- nextrec-0.2.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
52
- nextrec-0.2.3.dist-info/licenses/LICENSE,sha256=2fQfVKeafywkni7MYHyClC6RGGC3laLTXCNBx-ubtp0,1064
53
- nextrec-0.2.3.dist-info/RECORD,,
50
+ nextrec-0.2.4.dist-info/METADATA,sha256=VurhzAYPQ_PbBi6WJFHvgbbk08OVd_1udwLPHxqApag,11425
51
+ nextrec-0.2.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
52
+ nextrec-0.2.4.dist-info/licenses/LICENSE,sha256=2fQfVKeafywkni7MYHyClC6RGGC3laLTXCNBx-ubtp0,1064
53
+ nextrec-0.2.4.dist-info/RECORD,,