nextrec 0.4.34__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -156,7 +156,7 @@ class AFM(BaseModel):
156
156
  # First-order dense part
157
157
  if self.linear_dense is not None:
158
158
  dense_inputs = [
159
- x[f.name].float().view(batch_size, -1) for f in self.dense_features
159
+ x[f.name].float().reshape(batch_size, -1) for f in self.dense_features
160
160
  ]
161
161
  dense_stack = torch.cat(dense_inputs, dim=1) if dense_inputs else None
162
162
  if dense_stack is not None:
@@ -170,7 +170,7 @@ class AFM(BaseModel):
170
170
  term = emb(x[feature.name].long()) # [B, 1]
171
171
  else: # SequenceFeature
172
172
  seq_input = x[feature.name].long() # [B, 1]
173
- if feature.max_len is not None and seq_input.size(1) > feature.max_len:
173
+ if feature.max_len is not None:
174
174
  seq_input = seq_input[:, -feature.max_len :]
175
175
  mask = self.input_mask(x, feature, seq_input).squeeze(1) # [B, 1]
176
176
  seq_weight = emb(seq_input).squeeze(-1) # [B, L]
@@ -186,16 +186,11 @@ class AFM(BaseModel):
186
186
  for feature in self.fm_features:
187
187
  value = x.get(f"{feature.name}_value")
188
188
  if value is not None:
189
- value = value.float()
190
- if value.dim() == 1:
191
- value = value.unsqueeze(-1)
189
+ value = value.float().reshape(batch_size, -1)
192
190
  else:
193
191
  if isinstance(feature, SequenceFeature):
194
192
  seq_input = x[feature.name].long()
195
- if (
196
- feature.max_len is not None
197
- and seq_input.size(1) > feature.max_len
198
- ):
193
+ if feature.max_len is not None:
199
194
  seq_input = seq_input[:, -feature.max_len :]
200
195
  value = self.input_mask(x, feature, seq_input).sum(dim=2) # [B, 1]
201
196
  else:
@@ -390,13 +390,13 @@ class DIEN(BaseModel):
390
390
  dim=-1,
391
391
  )
392
392
  score_t = self.attention_layer.attention_net(concat_feat) # [B, 1]
393
- att_scores_list.append(score_t)
393
+ att_scores_list.append(score_t.unsqueeze(1))
394
394
 
395
395
  # [B, seq_len, 1]
396
396
  att_scores = torch.cat(att_scores_list, dim=1)
397
397
 
398
- scores_flat = att_scores.squeeze(-1) # [B, seq_len]
399
- mask_flat = mask.squeeze(-1) # [B, seq_len]
398
+ scores_flat = att_scores[..., 0] # [B, seq_len]
399
+ mask_flat = mask[..., 0] # [B, seq_len]
400
400
 
401
401
  scores_flat = scores_flat.masked_fill(mask_flat == 0, -1e9)
402
402
  att_weights = torch.softmax(scores_flat, dim=1) # [B, seq_len]
@@ -437,8 +437,7 @@ class DIEN(BaseModel):
437
437
 
438
438
  for feat in self.dense_features:
439
439
  val = x[feat.name].float()
440
- if val.dim() == 1:
441
- val = val.unsqueeze(1)
440
+ val = val.view(val.size(0), -1)
442
441
  other_embeddings.append(val)
443
442
 
444
443
  concat_input = torch.cat(other_embeddings, dim=-1) # [B, total_dim]
@@ -460,15 +459,15 @@ class DIEN(BaseModel):
460
459
  interest_states = interest_states[:, :-1, :]
461
460
  pos_seq = behavior_emb[:, 1:, :]
462
461
  neg_seq = neg_behavior_emb[:, 1:, :]
463
- aux_mask = mask[:, 1:, :].squeeze(-1)
462
+ aux_mask = mask[:, 1:, 0]
464
463
 
465
464
  if aux_mask.sum() == 0:
466
465
  return torch.tensor(0.0, device=self.device)
467
466
 
468
467
  pos_input = torch.cat([interest_states, pos_seq], dim=-1)
469
468
  neg_input = torch.cat([interest_states, neg_seq], dim=-1)
470
- pos_logits = self.auxiliary_net(pos_input).squeeze(-1)
471
- neg_logits = self.auxiliary_net(neg_input).squeeze(-1)
469
+ pos_logits = self.auxiliary_net(pos_input)[..., 0]
470
+ neg_logits = self.auxiliary_net(neg_input)[..., 0]
472
471
 
473
472
  pos_loss = F.binary_cross_entropy_with_logits(
474
473
  pos_logits, torch.ones_like(pos_logits), reduction="none"
@@ -190,7 +190,7 @@ class FFM(BaseModel):
190
190
  return emb(x[feature.name].long())
191
191
 
192
192
  seq_input = x[feature.name].long()
193
- if feature.max_len is not None and seq_input.size(1) > feature.max_len:
193
+ if feature.max_len is not None:
194
194
  seq_input = seq_input[:, -feature.max_len :]
195
195
  seq_emb = emb(seq_input) # [B, L, D]
196
196
  mask = self.input_mask(x, feature, seq_input)
@@ -224,7 +224,7 @@ class FFM(BaseModel):
224
224
  term = emb(x[feature.name].long()) # [B, 1]
225
225
  else:
226
226
  seq_input = x[feature.name].long()
227
- if feature.max_len is not None and seq_input.size(1) > feature.max_len:
227
+ if feature.max_len is not None:
228
228
  seq_input = seq_input[:, -feature.max_len :]
229
229
  mask = self.input_mask(x, feature, seq_input).squeeze(1) # [B, L]
230
230
  seq_weight = emb(seq_input).squeeze(-1) # [B, L]
@@ -223,8 +223,7 @@ class SDM(BaseMatchModel):
223
223
  for feat in self.user_dense_features:
224
224
  if feat.name in user_input:
225
225
  val = user_input[feat.name].float()
226
- if val.dim() == 1:
227
- val = val.unsqueeze(1)
226
+ val = val.reshape(val.size(0), -1)
228
227
  dense_features.append(val)
229
228
  if dense_features:
230
229
  features_list.append(torch.cat(dense_features, dim=1))
@@ -438,8 +438,6 @@ class HSTU(BaseModel):
438
438
  return self.causal_mask[:seq_len, :seq_len]
439
439
 
440
440
  def trim_sequence(self, seq: torch.Tensor) -> torch.Tensor:
441
- if seq.size(1) <= self.max_seq_len:
442
- return seq
443
441
  return seq[:, -self.max_seq_len :]
444
442
 
445
443
  def forward(self, x: dict[str, torch.Tensor]) -> torch.Tensor:
@@ -0,0 +1,252 @@
1
+ """
2
+ ONNX utilities for NextRec.
3
+
4
+ Date: create on 25/01/2026
5
+ Author: Yang Zhou, zyaztec@gmail.com
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from pathlib import Path
11
+ from typing import Iterable, Sequence
12
+
13
+ import torch
14
+ import numpy as np
15
+ import onnxruntime as ort
16
+
17
+ from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
18
+ from nextrec.utils.torch_utils import to_numpy
19
+
20
+
21
+ class OnnxModelWrapper(torch.nn.Module):
22
+ """Wrap a NextRec model to accept positional ONNX inputs."""
23
+
24
+ def __init__(self, model: torch.nn.Module, feature_names: Sequence[str]):
25
+ super().__init__()
26
+ self.model = model
27
+ self.feature_names = list(feature_names)
28
+
29
+ def forward(self, *inputs: torch.Tensor):
30
+ if len(inputs) != len(self.feature_names):
31
+ raise ValueError(
32
+ "[OnnxWrapper Error] Number of inputs does not match feature names."
33
+ )
34
+ x = {name: tensor for name, tensor in zip(self.feature_names, inputs)}
35
+ output = self.model(x)
36
+ if isinstance(output, list):
37
+ return tuple(output)
38
+ return output
39
+
40
+
41
+ def create_dummy_inputs(
42
+ features: Iterable[object],
43
+ batch_size: int,
44
+ device: torch.device,
45
+ default_seq_len: int = 10,
46
+ seq_len_map: dict[str, int] | None = None,
47
+ ) -> list[torch.Tensor]:
48
+ tensors: list[torch.Tensor] = []
49
+ for feature in features:
50
+ if isinstance(feature, DenseFeature):
51
+ input_dim = max(int(feature.input_dim), 1)
52
+ tensors.append(
53
+ torch.zeros((batch_size, input_dim), dtype=torch.float32, device=device)
54
+ )
55
+ elif isinstance(feature, SequenceFeature):
56
+ seq_len = None
57
+ if seq_len_map:
58
+ seq_len = seq_len_map.get(feature.name)
59
+ if seq_len is None:
60
+ seq_len = (
61
+ feature.max_len if feature.max_len is not None else default_seq_len
62
+ )
63
+ seq_len = max(int(seq_len), 1)
64
+ tensors.append(
65
+ torch.zeros((batch_size, seq_len), dtype=torch.long, device=device)
66
+ )
67
+ else:
68
+ tensors.append(torch.zeros((batch_size,), dtype=torch.long, device=device))
69
+ return tensors
70
+
71
+
72
+ def normalize_dense(feature: DenseFeature, array: object) -> np.ndarray:
73
+ arr = np.asarray(array, dtype=np.float32)
74
+ if arr.ndim == 1:
75
+ arr = arr.reshape(-1, 1)
76
+ else:
77
+ arr = arr.reshape(arr.shape[0], -1)
78
+ expected = max(int(feature.input_dim), 1)
79
+ if arr.shape[1] != expected:
80
+ raise ValueError(
81
+ f"[ONNX Input Error] Dense feature '{feature.name}' expects {expected} dims but got {arr.shape[1]}."
82
+ )
83
+ return arr
84
+
85
+
86
+ def normalize_sparse(feature: SparseFeature, array: object) -> np.ndarray:
87
+ arr = np.asarray(array, dtype=np.int64)
88
+ if arr.ndim == 2 and arr.shape[1] == 1:
89
+ arr = arr.reshape(-1)
90
+ elif arr.ndim != 1:
91
+ arr = arr.reshape(arr.shape[0], -1)
92
+ if arr.shape[1] != 1:
93
+ raise ValueError(
94
+ f"[ONNX Input Error] Sparse feature '{feature.name}' expects 1 dim but got {arr.shape}."
95
+ )
96
+ arr = arr.reshape(-1)
97
+ return arr
98
+
99
+
100
+ def normalize_sequence(feature: SequenceFeature, array: object) -> np.ndarray:
101
+ arr = np.asarray(array, dtype=np.int64)
102
+ if arr.ndim == 1:
103
+ arr = arr.reshape(1, -1)
104
+ elif arr.ndim > 2:
105
+ arr = arr.reshape(arr.shape[0], -1)
106
+ max_len = feature.max_len if feature.max_len is not None else arr.shape[1]
107
+ max_len = max(int(max_len), 1)
108
+ if arr.shape[1] > max_len:
109
+ arr = arr[:, :max_len]
110
+ elif arr.shape[1] < max_len:
111
+ pad_value = feature.padding_idx if feature.padding_idx is not None else 0
112
+ pad_width = max_len - arr.shape[1]
113
+ arr = np.pad(arr, ((0, 0), (0, pad_width)), constant_values=pad_value)
114
+ return arr
115
+
116
+
117
+ def build_onnx_input_feed(
118
+ features: Iterable[object],
119
+ feature_batch: dict[str, object],
120
+ input_names: Sequence[str] | None = None,
121
+ ) -> dict[str, np.ndarray]:
122
+ feed: dict[str, np.ndarray] = {}
123
+ for feature in features:
124
+ if input_names is not None and feature.name not in input_names:
125
+ continue
126
+ if feature.name not in feature_batch:
127
+ raise KeyError(
128
+ f"[ONNX Input Error] Feature '{feature.name}' missing from batch data."
129
+ )
130
+ value = to_numpy(feature_batch[feature.name])
131
+ if isinstance(feature, DenseFeature):
132
+ value = normalize_dense(feature, value)
133
+ elif isinstance(feature, SequenceFeature):
134
+ value = normalize_sequence(feature, value)
135
+ else:
136
+ value = normalize_sparse(feature, value)
137
+ feed[feature.name] = value
138
+ return feed
139
+
140
+
141
+ def pad_tensor(
142
+ value: torch.Tensor, pad_rows: int, pad_value: int | float
143
+ ) -> torch.Tensor:
144
+ if pad_rows <= 0:
145
+ return value
146
+ pad_shape = (pad_rows, *value.shape[1:])
147
+ pad = torch.full(pad_shape, pad_value, dtype=value.dtype, device=value.device)
148
+ return torch.cat([value, pad], dim=0)
149
+
150
+
151
+ def pad_array(value: np.ndarray, pad_rows: int, pad_value: int | float) -> np.ndarray:
152
+ if pad_rows <= 0:
153
+ return value
154
+ pad_shape = (pad_rows, *value.shape[1:])
155
+ pad = np.full(pad_shape, pad_value, dtype=value.dtype)
156
+ return np.concatenate([value, pad], axis=0)
157
+
158
+
159
+ def pad_onnx_inputs(
160
+ features: Iterable[object],
161
+ feature_batch: dict[str, object],
162
+ target_batch: int,
163
+ ) -> tuple[dict[str, object], int]:
164
+ if target_batch <= 0:
165
+ return feature_batch, 0
166
+ padded: dict[str, object] = {}
167
+ orig_batch = None
168
+ for feature in features:
169
+ if feature.name not in feature_batch:
170
+ continue
171
+ value = feature_batch[feature.name]
172
+ if isinstance(value, torch.Tensor):
173
+ batch = value.shape[0] if value.dim() > 0 else 1
174
+ else:
175
+ arr = np.asarray(value)
176
+ batch = arr.shape[0] if arr.ndim > 0 else 1
177
+ if orig_batch is None:
178
+ orig_batch = int(batch)
179
+ pad_rows = max(int(target_batch) - int(batch), 0)
180
+ if isinstance(feature, DenseFeature):
181
+ pad_value: int | float = 0.0
182
+ elif isinstance(feature, SequenceFeature):
183
+ pad_value = feature.padding_idx if feature.padding_idx is not None else 0
184
+ else:
185
+ pad_value = 0
186
+ if isinstance(value, torch.Tensor):
187
+ padded[feature.name] = pad_tensor(value, pad_rows, pad_value)
188
+ else:
189
+ padded[feature.name] = pad_array(np.asarray(value), pad_rows, pad_value)
190
+ if orig_batch is None:
191
+ orig_batch = 0
192
+ return padded, orig_batch
193
+
194
+
195
+ def pad_id_batch(
196
+ id_batch: dict[str, object],
197
+ target_batch: int,
198
+ pad_value: int | float = 0,
199
+ ) -> tuple[dict[str, object], int]:
200
+ if target_batch <= 0:
201
+ return id_batch, 0
202
+ padded: dict[str, object] = {}
203
+ orig_batch = None
204
+ for name, value in id_batch.items():
205
+ if isinstance(value, torch.Tensor):
206
+ batch = value.shape[0] if value.dim() > 0 else 1
207
+ else:
208
+ arr = np.asarray(value)
209
+ batch = arr.shape[0] if arr.ndim > 0 else 1
210
+ if orig_batch is None:
211
+ orig_batch = int(batch)
212
+ pad_rows = max(int(target_batch) - int(batch), 0)
213
+ if isinstance(value, torch.Tensor):
214
+ padded[name] = pad_tensor(value, pad_rows, pad_value)
215
+ else:
216
+ padded[name] = pad_array(np.asarray(value), pad_rows, pad_value)
217
+ if orig_batch is None:
218
+ orig_batch = 0
219
+ return padded, orig_batch
220
+
221
+
222
+ def merge_onnx_outputs(outputs: Sequence[np.ndarray]) -> np.ndarray:
223
+ if not outputs:
224
+ raise ValueError("[ONNX Output Error] Empty ONNX output list.")
225
+ if len(outputs) == 1:
226
+ return outputs[0]
227
+ normalized: list[np.ndarray] = []
228
+ batch = outputs[0].shape[0] if outputs[0].ndim > 0 else None
229
+ for out in outputs:
230
+ arr = np.asarray(out)
231
+ if arr.ndim == 0:
232
+ arr = arr.reshape(1, 1)
233
+ elif arr.ndim == 1:
234
+ arr = arr.reshape(-1, 1)
235
+ if batch is not None and arr.shape[0] != batch:
236
+ raise ValueError(
237
+ "[ONNX Output Error] Output batch size mismatch across ONNX outputs."
238
+ )
239
+ normalized.append(arr)
240
+ return np.concatenate(normalized, axis=1)
241
+
242
+
243
+ def load_onnx_session(
244
+ onnx_path: str | Path,
245
+ ):
246
+ available = ort.get_available_providers()
247
+ preferred = ["CUDAExecutionProvider", "CPUExecutionProvider"]
248
+ selected = [p for p in preferred if p in available]
249
+ if not selected:
250
+ selected = available
251
+
252
+ return ort.InferenceSession(str(onnx_path), providers=selected)
@@ -61,7 +61,12 @@ def to_tensor(
61
61
  ) -> torch.Tensor:
62
62
  if value is None:
63
63
  raise ValueError("[Tensor Utils Error] Cannot convert None to tensor.")
64
- tensor = value if isinstance(value, torch.Tensor) else torch.as_tensor(value)
64
+ if isinstance(value, torch.Tensor):
65
+ tensor = value
66
+ else:
67
+ if isinstance(value, np.ndarray):
68
+ value = value.copy()
69
+ tensor = torch.as_tensor(value)
65
70
  if tensor.dtype != dtype:
66
71
  tensor = tensor.to(dtype=dtype)
67
72
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nextrec
3
- Version: 0.4.34
3
+ Version: 0.5.1
4
4
  Summary: A comprehensive recommendation library with match, ranking, and multi-task learning models
5
5
  Project-URL: Homepage, https://github.com/zerolovesea/NextRec
6
6
  Project-URL: Repository, https://github.com/zerolovesea/NextRec
@@ -24,10 +24,14 @@ Requires-Dist: numpy<2.0,>=1.21; sys_platform == 'linux' and python_version < '3
24
24
  Requires-Dist: numpy<3.0,>=1.26; sys_platform == 'linux' and python_version >= '3.12'
25
25
  Requires-Dist: numpy>=1.23.0; sys_platform == 'win32'
26
26
  Requires-Dist: numpy>=1.24.0; sys_platform == 'darwin'
27
+ Requires-Dist: onnx>=1.16.0
28
+ Requires-Dist: onnxruntime>=1.18.0
29
+ Requires-Dist: onnxscript>=0.1.1
27
30
  Requires-Dist: pandas<2.0,>=1.5; sys_platform == 'linux' and python_version < '3.12'
28
31
  Requires-Dist: pandas<2.3.0,>=2.1.0; sys_platform == 'win32'
29
32
  Requires-Dist: pandas>=2.0.0; sys_platform == 'darwin'
30
33
  Requires-Dist: pandas>=2.1.0; sys_platform == 'linux' and python_version >= '3.12'
34
+ Requires-Dist: polars>=0.20.0
31
35
  Requires-Dist: pyarrow<13.0.0,>=10.0.0; sys_platform == 'linux' and python_version < '3.12'
32
36
  Requires-Dist: pyarrow<15.0.0,>=12.0.0; sys_platform == 'win32'
33
37
  Requires-Dist: pyarrow>=12.0.0; sys_platform == 'darwin'
@@ -69,7 +73,7 @@ Description-Content-Type: text/markdown
69
73
  ![Python](https://img.shields.io/badge/Python-3.10+-blue.svg)
70
74
  ![PyTorch](https://img.shields.io/badge/PyTorch-1.10+-ee4c2c.svg)
71
75
  ![License](https://img.shields.io/badge/License-Apache%202.0-green.svg)
72
- ![Version](https://img.shields.io/badge/Version-0.4.34-orange.svg)
76
+ ![Version](https://img.shields.io/badge/Version-0.5.1-orange.svg)
73
77
  [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/zerolovesea/NextRec)
74
78
 
75
79
  中文文档 | [English Version](README_en.md)
@@ -102,6 +106,7 @@ NextRec是一个基于PyTorch的现代推荐系统框架,旨在为研究工程
102
106
  - **高效训练与评估**:内置多种优化器、学习率调度、早停、模型检查点与详细的日志管理,开箱即用。
103
107
 
104
108
  ## NextRec近期进展
109
+ - **28/01/2026** 在v0.4.39中加入了对onnx导出和加载的支持,并大大加速了数据预处理速度(最高9x加速)
105
110
  - **01/01/2026** 新年好,在v0.4.27中加入了多个多目标模型的支持:[APG](nextrec/models/multi_task/apg.py), [ESCM](nextrec/models/multi_task/escm.py), [HMoE](nextrec/models/multi_task/hmoe.py), [Cross Stitch](nextrec/models/multi_task/cross_stitch.py)
106
111
  - **28/12/2025** 在v0.4.21中加入了对SwanLab和Wandb的支持,通过model的`fit`方法进行配置:`use_swanlab=True, swanlab_kwargs={"project": "NextRec","name":"tutorial_movielens_deepfm"},`
107
112
  - **21/12/2025** 在v0.4.16中加入了对[GradNorm](/nextrec/loss/grad_norm.py)的支持,通过compile的`loss_weight='grad_norm'`进行配置
@@ -136,6 +141,7 @@ pip install nextrec # or pip install -e .
136
141
  - [example_multitask.py](/tutorials/example_multitask.py) - 电商数据集上的ESMM多任务学习训练示例
137
142
  - [movielen_match_dssm.py](/tutorials/movielen_match_dssm.py) - 基于movielen 100k数据集训练的 DSSM 召回模型示例
138
143
 
144
+ - [example_onnx.py](/tutorials/example_onnx.py) - 使用NextRec训练和导出onnx模型
139
145
  - [example_distributed_training.py](/tutorials/distributed/example_distributed_training.py) - 使用NextRec进行单机多卡训练的代码示例
140
146
 
141
147
  - [run_all_ranking_models.py](/tutorials/run_all_ranking_models.py) - 快速校验所有排序模型的可用性
@@ -254,11 +260,11 @@ nextrec --mode=predict --predict_config=path/to/predict_config.yaml
254
260
 
255
261
  预测结果固定保存到 `{checkpoint_path}/predictions/{name}.{save_data_format}`。
256
262
 
257
- > 截止当前版本0.4.34,NextRec CLI支持单机训练,分布式训练相关功能尚在开发中。
263
+ > 截止当前版本0.5.1,NextRec CLI支持单机训练,分布式训练相关功能尚在开发中。
258
264
 
259
265
  ## 兼容平台
260
266
 
261
- 当前最新版本为0.4.34,所有模型和测试代码均已在以下平台通过验证,如果开发者在使用中遇到兼容问题,请在issue区提出错误报告及系统版本:
267
+ 当前最新版本为0.5.1,所有模型和测试代码均已在以下平台通过验证,如果开发者在使用中遇到兼容问题,请在issue区提出错误报告及系统版本:
262
268
 
263
269
  | 平台 | 配置 |
264
270
  |------|------|
@@ -1,24 +1,24 @@
1
1
  nextrec/__init__.py,sha256=_M3oUqyuvQ5k8Th_3wId6hQ_caclh7M5ad51XN09m98,235
2
- nextrec/__version__.py,sha256=lEcP5iYKQN3Lr-tUbNBN0qjKzIg4RjGwnIQfJt42ids,23
3
- nextrec/cli.py,sha256=ct6tnDRK3_M_dIjCt0W_KpEJdNgXBXY68SvGHsAgwU4,25874
2
+ nextrec/__version__.py,sha256=eZ1bOun1DDVV0YLOBW4wj2FP1ajReLjbIrGmzN7ASBw,22
3
+ nextrec/cli.py,sha256=lISpZYbdyQpKl6rWqm4ENXh4WqSfu-8jlC8jTDt1u1Y,29074
4
4
  nextrec/basic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
- nextrec/basic/activation.py,sha256=2A4sgkXzPUsGzBqu9kfGEw3IY2Z3MB22gXh12dAR-U8,2896
5
+ nextrec/basic/activation.py,sha256=rU-W-DHgiD3AZnMGmD014ChxklfP9BpedDTiwtdgXhA,2762
6
6
  nextrec/basic/asserts.py,sha256=eaB4FZJ7Sbh9S8PLJZNiYd7Zi98ca2rDi4S7wSYCaEw,1473
7
7
  nextrec/basic/callback.py,sha256=H1C7EdkLTRLtPrKwCk1Gwq41m7I6tmtUWEST5Ih9fHI,12648
8
8
  nextrec/basic/features.py,sha256=hVbEEtEYFer5OLkqNEc0N0vK9QkkWE9Il8FOW0hejZQ,8528
9
9
  nextrec/basic/heads.py,sha256=WqvavaH6Y8Au8dLaoUfH2AaOOWgYvjZI5US8avkQNsQ,4009
10
- nextrec/basic/layers.py,sha256=ta2tLCERCoWXRzADb5ziGkxA2iftdiSB1bYJhdy55WQ,39526
10
+ nextrec/basic/layers.py,sha256=tawggQMMHlYTGpnubxUAvDPDJe_Lpq-HpLLCSjbJV54,37320
11
11
  nextrec/basic/loggers.py,sha256=jJUzUt_kMpjpV2Mqr7qBENWA1olEutTI7dFnpmndUUw,13845
12
12
  nextrec/basic/metrics.py,sha256=nVz3AkKwsxj_M97CoZWyQj8_Y9ZM_Icvw_QCM6c33Bc,26262
13
- nextrec/basic/model.py,sha256=uKq74cZ7OvO6I9fHA3-ztpiD7xw-jevVKJicf-A_M9A,111999
13
+ nextrec/basic/model.py,sha256=x4QZc8lNXHzpOJLeN3qPs9G_kNC1KgKn_2K52v3-vLw,132691
14
14
  nextrec/basic/session.py,sha256=mrIsjRJhmvcAfoO1pXX-KB3SK5CCgz89wH8XDoAiGEI,4475
15
15
  nextrec/basic/summary.py,sha256=hCDVB8127GSGtlfFnfEFHWXuvW5qjCSTwowNoA1i1xE,19815
16
16
  nextrec/data/__init__.py,sha256=YZQjpty1pDCM7q_YNmiA2sa5kbujUw26ObLHWjMPjKY,1194
17
17
  nextrec/data/batch_utils.py,sha256=TbnXYqYlmK51dJthaL6dO7LTn4wyp8221I-kdgvpvDE,3542
18
- nextrec/data/data_processing.py,sha256=lhuwYxWp4Ts2bbuLGDt2LmuPrOy7pNcKczd2uVcQ4ss,6476
18
+ nextrec/data/data_processing.py,sha256=xD6afp4zc217ddKfDtHtToyDpxMDWvoqD_Vk4pIpvXU,6333
19
19
  nextrec/data/data_utils.py,sha256=0Ls1cnG9lBz0ovtyedw5vwp7WegGK_iF-F8e_3DEddo,880
20
20
  nextrec/data/dataloader.py,sha256=2sXwoiWxupKE-V1QYeZlXjK1yJyxhDtlOhknAnJF8Wk,19727
21
- nextrec/data/preprocessor.py,sha256=SsXkO5exQsXDZ2gx1Qwc4K58u0rG_K6lev8PbGipV58,68518
21
+ nextrec/data/preprocessor.py,sha256=kOEfPy0t0M3jBA0kPIlwuSQYsuvn8yUNr_uE_NiulHU,49939
22
22
  nextrec/loss/__init__.py,sha256=rualGsY-IBvmM52q9eOBk0MyKcMkpkazcscOeDXi_SM,774
23
23
  nextrec/loss/grad_norm.py,sha256=I4jAs0f84I7MWmYZOMC0JRUNvBHZzhgpuox0hOtYWDg,7435
24
24
  nextrec/loss/listwise.py,sha256=mluxXQt9XiuWGvXA1nk4I0miqaKB6_GPVQqxLhAiJKs,5999
@@ -28,7 +28,6 @@ nextrec/models/generative/__init__.py,sha256=0MV3P-_ainPaTxmRBGWKUVCEt14KJvuvEHm
28
28
  nextrec/models/generative/tiger.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
29
  nextrec/models/multi_task/[pre]aitm.py,sha256=A2n0T4JEui-uHgbqITU5lpsmtnP14fQXRZM1peTPvhQ,6661
30
30
  nextrec/models/multi_task/[pre]snr_trans.py,sha256=k08tC-TI--a_Tt4_BmX0ZubzntyqwsejutYzbB5F4S4,9077
31
- nextrec/models/multi_task/[pre]star.py,sha256=BczXHPJtK7xyPbLO0fQ-w7qnzaBvLpyhG0CKMBUItCY,7057
32
31
  nextrec/models/multi_task/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
32
  nextrec/models/multi_task/apg.py,sha256=dNveyottpHTd811_3KVZQQqgffDzmX_rY1EMiv1oKeo,13536
34
33
  nextrec/models/multi_task/cross_stitch.py,sha256=o48ZZFKWXz-w4MWboVdxDxifM0V3_n2Is2nZ2HoJkfw,9441
@@ -41,15 +40,15 @@ nextrec/models/multi_task/ple.py,sha256=h8xPqd7BFM76GbQL6RzEbi2EAB8igrkFGS4zqhtW
41
40
  nextrec/models/multi_task/poso.py,sha256=jr-RaLl5UnZc1HcEIK3HrNnc_g17BImyJb43d4rEXpE,18218
42
41
  nextrec/models/multi_task/share_bottom.py,sha256=yM5--iqwEFQwpeQy_SmY8Vdo8a1Exi0-LNKSYeJz3hc,5238
43
42
  nextrec/models/ranking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
44
- nextrec/models/ranking/afm.py,sha256=2IylgCi7DBZzF7bdWFWvsEBlnRmyradxdyJiz5P4Nwg,9397
43
+ nextrec/models/ranking/afm.py,sha256=QPGBNlR9qjS5V5F-A8BnRBPcwwU6qcXQa96cNk9xlsY,9187
45
44
  nextrec/models/ranking/autoint.py,sha256=5g0tMtvkt3FiCjqmU76K7w7z-h3nMJbP0eVt5Ph4bbU,7259
46
45
  nextrec/models/ranking/dcn.py,sha256=qG3O2oL9ZLu9NBOJST06JeEh0k6UTXs-X1mQe_H4QCE,6679
47
46
  nextrec/models/ranking/dcn_v2.py,sha256=807wASeG56WYD7mEDEKaERw3r-Jpas6WhDzO0HGEk9I,10382
48
47
  nextrec/models/ranking/deepfm.py,sha256=1fuc-9f8DKDUH7EAY-XX5lITJ7Qw7ge6PTWFfQ7wld8,4404
49
- nextrec/models/ranking/dien.py,sha256=cluW44zOBHadf3oup6h7MpTexDXjH_VPkYcqsNBT80Y,18361
48
+ nextrec/models/ranking/dien.py,sha256=9OM2vz1Umqlvny6UUySTEz5g_jf8I4creox9J7oV82A,18320
50
49
  nextrec/models/ranking/din.py,sha256=J6-S72_KJYLrzUmdrh6aAx-Qc1C08ZY7lY_KFtAkJz0,8855
51
50
  nextrec/models/ranking/eulernet.py,sha256=0nOBfccfvukSZLUNOCcB_azCh52DGJq-s9doyEGMN8E,11484
52
- nextrec/models/ranking/ffm.py,sha256=v15x2-rExcrEYdcPf2IxEgx-ImDSevhkhi4Oe4GbloY,10512
51
+ nextrec/models/ranking/ffm.py,sha256=v8_whymddfY7u0F9rD4VTUQJXnyYYJYvbgwiR7DKuII,10432
53
52
  nextrec/models/ranking/fibinet.py,sha256=ejR1vNh5XM23SD7mfT686kuv3cmf5gKfkj0z_iMQqNA,7283
54
53
  nextrec/models/ranking/fm.py,sha256=SlFtbtnrZbeRnCHf-kUAMaeLV_wDgLiaBwPGeAO_ycM,3795
55
54
  nextrec/models/ranking/lr.py,sha256=0gmqPED-z7k4WVRy11WSLhYfS4bJQPRQTzQbe-rUITg,3227
@@ -63,9 +62,9 @@ nextrec/models/retrieval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
63
62
  nextrec/models/retrieval/dssm.py,sha256=AmXDr62M6tivBg1P4MQ8f4cZnl1TGHxRBvZj05zWw64,6887
64
63
  nextrec/models/retrieval/dssm_v2.py,sha256=5ZH3dfNfRCDE69k8KG8BZJixaGOSVvQHB9uIDPMLPk4,5953
65
64
  nextrec/models/retrieval/mind.py,sha256=I0qVj39ApweRGW3qDNLca5vsNtJwRe7gBLh1pedsexY,14061
66
- nextrec/models/retrieval/sdm.py,sha256=1Y2gidG7WKuuGFaaQ8BcBGhQYoyyLPyhpRTo_xE1pmc,9987
65
+ nextrec/models/retrieval/sdm.py,sha256=h9TqVmSJ8YF7hgPci784nAlBg1LazB641c4iEeuiLDg,9956
67
66
  nextrec/models/retrieval/youtube_dnn.py,sha256=hLyR4liuusJIjRg4vuaSoSEecYgDICipXnNFiA3o3oY,6351
68
- nextrec/models/sequential/hstu.py,sha256=iZcYLp44r23nHYNhGwD25JfH85DBrFwHOTg1WpHvLe8,18983
67
+ nextrec/models/sequential/hstu.py,sha256=XFq-IERFg2ohqg03HkP6YinQaZUXljtYayUmvU-N_IY,18916
69
68
  nextrec/models/tree_base/__init__.py,sha256=ssGpU1viVidr1tgKCyvmkUXe32bUmwb5qPEkTh7ce70,342
70
69
  nextrec/models/tree_base/base.py,sha256=HcxUkctNgizY9LOBh2qXY6gUiYoy2JXA7j11NwUfWT4,26562
71
70
  nextrec/models/tree_base/catboost.py,sha256=hXINyx7iianwDxOZx3SLm0i-YP1jiC3HcAeqP9A2i4A,3434
@@ -78,10 +77,11 @@ nextrec/utils/data.py,sha256=pSL96mWjWfW_RKE-qlUSs9vfiYnFZAaRirzA6r7DB6s,24994
78
77
  nextrec/utils/embedding.py,sha256=akAEc062MG2cD7VIOllHaqtwzAirQR2gq5iW7oKpGAU,1449
79
78
  nextrec/utils/loss.py,sha256=GBWQGpDaYkMJySpdG078XbeUNXUC34PVqFy0AqNS9N0,4578
80
79
  nextrec/utils/model.py,sha256=PI9y8oWz1lhktgapZsiXb8rTr2NrFFlc80tr4yOFHik,5334
81
- nextrec/utils/torch_utils.py,sha256=fxViD6Pah0qnXtpvem6ncuLV7y58Q_gyktfvkZQo_JI,12207
80
+ nextrec/utils/onnx_utils.py,sha256=KIVV_ELYzj3kCswfsSBZ1F2OnSwRJnXj7sxDBwBoBaA,8668
81
+ nextrec/utils/torch_utils.py,sha256=_a9e6GXa3QKuu0E5RL44QRZ1iJSobbtNcPB3vtaCsu8,12313
82
82
  nextrec/utils/types.py,sha256=LFwYCBRo5WeYUh5LSCuyP1Lg9ez0Ih00Es3fUttGAFw,2273
83
- nextrec-0.4.34.dist-info/METADATA,sha256=2EpwNICyqgNVBUW2DNSMqzJR741TiCb-SrqZZx2VYtQ,23188
84
- nextrec-0.4.34.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
85
- nextrec-0.4.34.dist-info/entry_points.txt,sha256=NN-dNSdfMRTv86bNXM7d3ZEPW2BQC6bRi7QP7i9cIps,45
86
- nextrec-0.4.34.dist-info/licenses/LICENSE,sha256=COP1BsqnEUwdx6GCkMjxOo5v3pUe4-Go_CdmQmSfYXM,1064
87
- nextrec-0.4.34.dist-info/RECORD,,
83
+ nextrec-0.5.1.dist-info/METADATA,sha256=DswmHjwCt-xuCenbNsKJgeSMw2LqVOAJFVFjgEn2CRc,23532
84
+ nextrec-0.5.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
85
+ nextrec-0.5.1.dist-info/entry_points.txt,sha256=NN-dNSdfMRTv86bNXM7d3ZEPW2BQC6bRi7QP7i9cIps,45
86
+ nextrec-0.5.1.dist-info/licenses/LICENSE,sha256=COP1BsqnEUwdx6GCkMjxOo5v3pUe4-Go_CdmQmSfYXM,1064
87
+ nextrec-0.5.1.dist-info/RECORD,,