nextrec 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. nextrec/__init__.py +41 -0
  2. nextrec/__version__.py +1 -0
  3. nextrec/basic/__init__.py +0 -0
  4. nextrec/basic/activation.py +92 -0
  5. nextrec/basic/callback.py +35 -0
  6. nextrec/basic/dataloader.py +447 -0
  7. nextrec/basic/features.py +87 -0
  8. nextrec/basic/layers.py +985 -0
  9. nextrec/basic/loggers.py +124 -0
  10. nextrec/basic/metrics.py +557 -0
  11. nextrec/basic/model.py +1438 -0
  12. nextrec/data/__init__.py +27 -0
  13. nextrec/data/data_utils.py +132 -0
  14. nextrec/data/preprocessor.py +662 -0
  15. nextrec/loss/__init__.py +35 -0
  16. nextrec/loss/loss_utils.py +136 -0
  17. nextrec/loss/match_losses.py +294 -0
  18. nextrec/models/generative/hstu.py +0 -0
  19. nextrec/models/generative/tiger.py +0 -0
  20. nextrec/models/match/__init__.py +13 -0
  21. nextrec/models/match/dssm.py +200 -0
  22. nextrec/models/match/dssm_v2.py +162 -0
  23. nextrec/models/match/mind.py +210 -0
  24. nextrec/models/match/sdm.py +253 -0
  25. nextrec/models/match/youtube_dnn.py +172 -0
  26. nextrec/models/multi_task/esmm.py +129 -0
  27. nextrec/models/multi_task/mmoe.py +161 -0
  28. nextrec/models/multi_task/ple.py +260 -0
  29. nextrec/models/multi_task/share_bottom.py +126 -0
  30. nextrec/models/ranking/__init__.py +17 -0
  31. nextrec/models/ranking/afm.py +118 -0
  32. nextrec/models/ranking/autoint.py +140 -0
  33. nextrec/models/ranking/dcn.py +120 -0
  34. nextrec/models/ranking/deepfm.py +95 -0
  35. nextrec/models/ranking/dien.py +214 -0
  36. nextrec/models/ranking/din.py +181 -0
  37. nextrec/models/ranking/fibinet.py +130 -0
  38. nextrec/models/ranking/fm.py +87 -0
  39. nextrec/models/ranking/masknet.py +125 -0
  40. nextrec/models/ranking/pnn.py +128 -0
  41. nextrec/models/ranking/widedeep.py +105 -0
  42. nextrec/models/ranking/xdeepfm.py +117 -0
  43. nextrec/utils/__init__.py +18 -0
  44. nextrec/utils/common.py +14 -0
  45. nextrec/utils/embedding.py +19 -0
  46. nextrec/utils/initializer.py +47 -0
  47. nextrec/utils/optimizer.py +75 -0
  48. nextrec-0.1.1.dist-info/METADATA +302 -0
  49. nextrec-0.1.1.dist-info/RECORD +51 -0
  50. nextrec-0.1.1.dist-info/WHEEL +4 -0
  51. nextrec-0.1.1.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,18 @@
1
+ from nextrec.utils.optimizer import get_optimizer_fn, get_scheduler_fn
2
+ from nextrec.utils.initializer import get_initializer_fn
3
+ from nextrec.utils.embedding import get_auto_embedding_dim
4
+ from nextrec.utils.common import get_task_type
5
+
6
+ from nextrec.utils import optimizer, initializer, embedding, common
7
+
8
+ __all__ = [
9
+ 'get_optimizer_fn',
10
+ 'get_scheduler_fn',
11
+ 'get_initializer_fn',
12
+ 'get_auto_embedding_dim',
13
+ 'get_task_type',
14
+ 'optimizer',
15
+ 'initializer',
16
+ 'embedding',
17
+ 'common',
18
+ ]
@@ -0,0 +1,14 @@
1
+ """
2
+ Common utilities for NextRec
3
+
4
+ Date: create on 13/11/2025
5
+ Author:
6
+ Yang Zhou, zyaztec@gmail.com
7
+ """
8
+
9
+
10
+ def get_task_type(model) -> str:
11
+ """
12
+ Get task type from model.
13
+ """
14
+ return model.task_type
@@ -0,0 +1,19 @@
1
+ """
2
+ Embedding utilities for NextRec
3
+
4
+ Date: create on 13/11/2025
5
+ Author:
6
+ Yang Zhou, zyaztec@gmail.com
7
+ """
8
+
9
+ import numpy as np
10
+
11
+
12
+ def get_auto_embedding_dim(num_classes: int) -> int:
13
+ """
14
+ Calculate the dim of embedding vector according to number of classes in the category.
15
+ Formula: emb_dim = [6 * (num_classes)^(1/4)]
16
+ Reference:
17
+ Deep & Cross Network for Ad Click Predictions.(ADKDD'17)
18
+ """
19
+ return int(np.floor(6 * np.power(num_classes, 0.25)))
@@ -0,0 +1,47 @@
1
+ """
2
+ Initialization utilities for NextRec
3
+
4
+ Date: create on 13/11/2025
5
+ Author:
6
+ Yang Zhou, zyaztec@gmail.com
7
+ """
8
+
9
+ import torch.nn as nn
10
+
11
+
12
+ def get_initializer_fn(init_type='normal', activation='linear', param=None):
13
+ """
14
+ Get parameter initialization function.
15
+
16
+ Examples:
17
+ >>> init_fn = get_initializer_fn('xavier_uniform', 'relu')
18
+ >>> init_fn(tensor)
19
+ >>> init_fn = get_initializer_fn('normal', param={'mean': 0.0, 'std': 0.01})
20
+ """
21
+ param = param or {}
22
+
23
+ try:
24
+ gain = param.get('gain', nn.init.calculate_gain(activation, param.get('param', None)))
25
+ except ValueError:
26
+ gain = 1.0 # for custom activations like 'dice'
27
+
28
+ def initializer_fn(tensor):
29
+ if init_type == 'xavier_uniform':
30
+ nn.init.xavier_uniform_(tensor, gain=gain)
31
+ elif init_type == 'xavier_normal':
32
+ nn.init.xavier_normal_(tensor, gain=gain)
33
+ elif init_type == 'kaiming_uniform':
34
+ nn.init.kaiming_uniform_(tensor, a=param.get('a', 0), nonlinearity=activation)
35
+ elif init_type == 'kaiming_normal':
36
+ nn.init.kaiming_normal_(tensor, a=param.get('a', 0), nonlinearity=activation)
37
+ elif init_type == 'orthogonal':
38
+ nn.init.orthogonal_(tensor, gain=gain)
39
+ elif init_type == 'normal':
40
+ nn.init.normal_(tensor, mean=param.get('mean', 0.0), std=param.get('std', 0.0001))
41
+ elif init_type == 'uniform':
42
+ nn.init.uniform_(tensor, a=param.get('a', -0.05), b=param.get('b', 0.05))
43
+ else:
44
+ raise ValueError(f"Unknown init_type: {init_type}")
45
+ return tensor
46
+
47
+ return initializer_fn
@@ -0,0 +1,75 @@
1
+ """
2
+ Optimizer and Scheduler utilities for NextRec
3
+
4
+ Date: create on 13/11/2025
5
+ Author:
6
+ Yang Zhou, zyaztec@gmail.com
7
+ """
8
+
9
+ import torch
10
+ from typing import Iterable
11
+
12
+
13
+ def get_optimizer_fn(
14
+ optimizer: str = "adam",
15
+ params: Iterable[torch.nn.Parameter] | None = None,
16
+ **optimizer_params
17
+ ):
18
+ """
19
+ Get optimizer function based on optimizer name or instance.
20
+
21
+ Examples:
22
+ >>> optimizer = get_optimizer_fn("adam", model.parameters(), lr=1e-3)
23
+ >>> optimizer = get_optimizer_fn("sgd", model.parameters(), lr=0.01, momentum=0.9)
24
+ """
25
+ if params is None:
26
+ raise ValueError("params cannot be None. Please provide model parameters.")
27
+
28
+ if 'lr' not in optimizer_params:
29
+ optimizer_params['lr'] = 1e-3
30
+
31
+ if isinstance(optimizer, str):
32
+ opt_name = optimizer.lower()
33
+ if opt_name == "adam":
34
+ opt_class = torch.optim.Adam
35
+ elif opt_name == "sgd":
36
+ opt_class = torch.optim.SGD
37
+ elif opt_name == "adamw":
38
+ opt_class = torch.optim.AdamW
39
+ elif opt_name == "adagrad":
40
+ opt_class = torch.optim.Adagrad
41
+ elif opt_name == "rmsprop":
42
+ opt_class = torch.optim.RMSprop
43
+ else:
44
+ raise NotImplementedError(f"Unsupported optimizer: {optimizer}")
45
+ optimizer_fn = opt_class(params=params, **optimizer_params)
46
+
47
+ elif isinstance(optimizer, torch.optim.Optimizer):
48
+ optimizer_fn = optimizer
49
+ else:
50
+ raise TypeError(f"Invalid optimizer type: {type(optimizer)}")
51
+
52
+ return optimizer_fn
53
+
54
+
55
+ def get_scheduler_fn(scheduler, optimizer, **scheduler_params):
56
+ """
57
+ Get learning rate scheduler function.
58
+
59
+ Examples:
60
+ >>> scheduler = get_scheduler_fn("step", optimizer, step_size=10, gamma=0.1)
61
+ >>> scheduler = get_scheduler_fn("cosine", optimizer, T_max=100)
62
+ """
63
+ if isinstance(scheduler, str):
64
+ if scheduler == "step":
65
+ scheduler_fn = torch.optim.lr_scheduler.StepLR(optimizer, **scheduler_params)
66
+ elif scheduler == "cosine":
67
+ scheduler_fn = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, **scheduler_params)
68
+ else:
69
+ raise NotImplementedError(f"Unsupported scheduler: {scheduler}")
70
+ elif isinstance(scheduler, torch.optim.lr_scheduler._LRScheduler):
71
+ scheduler_fn = scheduler
72
+ else:
73
+ raise TypeError(f"Invalid scheduler type: {type(scheduler)}")
74
+
75
+ return scheduler_fn
@@ -0,0 +1,302 @@
1
+ Metadata-Version: 2.4
2
+ Name: nextrec
3
+ Version: 0.1.1
4
+ Summary: A comprehensive recommendation library with match, ranking, and multi-task learning models
5
+ Project-URL: Homepage, https://github.com/zerolovesea/NextRec
6
+ Project-URL: Repository, https://github.com/zerolovesea/NextRec
7
+ Project-URL: Documentation, https://github.com/zerolovesea/NextRec/blob/main/README.md
8
+ Project-URL: Issues, https://github.com/zerolovesea/NextRec/issues
9
+ Author-email: zerolovesea <zyaztec@gmail.com>
10
+ License-File: LICENSE
11
+ Keywords: ctr,deep-learning,match,pytorch,ranking,recommendation
12
+ Classifier: Development Status :: 3 - Alpha
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: Intended Audience :: Science/Research
15
+ Classifier: License :: OSI Approved :: Apache Software License
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Programming Language :: Python :: 3.12
20
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
21
+ Requires-Python: >=3.10
22
+ Requires-Dist: fastparquet>=2023.4.0
23
+ Requires-Dist: numpy>=1.24.0
24
+ Requires-Dist: pandas>=2.0.0
25
+ Requires-Dist: pyarrow>=12.0.0
26
+ Requires-Dist: scikit-learn>=1.3.0
27
+ Requires-Dist: scipy>=1.10.0
28
+ Requires-Dist: torch>=2.0.0
29
+ Requires-Dist: torchvision>=0.15.0
30
+ Requires-Dist: tqdm>=4.65.0
31
+ Provides-Extra: dev
32
+ Requires-Dist: jupyter>=1.0.0; extra == 'dev'
33
+ Requires-Dist: matplotlib>=3.7.0; extra == 'dev'
34
+ Requires-Dist: pytest-cov>=4.1.0; extra == 'dev'
35
+ Requires-Dist: pytest-html>=3.2.0; extra == 'dev'
36
+ Requires-Dist: pytest-mock>=3.11.0; extra == 'dev'
37
+ Requires-Dist: pytest-timeout>=2.1.0; extra == 'dev'
38
+ Requires-Dist: pytest-xdist>=3.3.0; extra == 'dev'
39
+ Requires-Dist: pytest>=7.4.0; extra == 'dev'
40
+ Requires-Dist: seaborn>=0.12.0; extra == 'dev'
41
+ Description-Content-Type: text/markdown
42
+
43
+ # NextRec
44
+
45
+ <div align="center">
46
+
47
+ ![Python](https://img.shields.io/badge/Python-3.10+-blue.svg)
48
+ ![PyTorch](https://img.shields.io/badge/PyTorch-1.10+-ee4c2c.svg)
49
+ ![License](https://img.shields.io/badge/License-Apache%202.0-green.svg)
50
+ ![Version](https://img.shields.io/badge/Version-0.1.0-orange.svg)
51
+
52
+ [中文版](README_zh.md)
53
+
54
+ **A Unified, Efficient, and Scalable Recommendation System Framework**
55
+
56
+ </div>
57
+
58
+ ## Introduction
59
+
60
+ NextRec is a modern recommendation system framework built on PyTorch, providing a unified modeling, training, and evaluation experience for researchers and engineering teams. The framework adopts a modular design with rich built-in model implementations, data-processing tools, and production-ready training components, enabling quick coverage of multiple recommendation scenarios.
61
+
62
+ > This project draws on several open-source recommendation libraries, with the general layers referencing the mature implementations in [torch-rechub](https://github.com/datawhalechina/torch-rechub). These part of codes is still in its early stage and is being gradually replaced with our own implementations. If you find any bugs, please submit them in the issue section. Contributions are welcome.
63
+
64
+ ### Key Features
65
+
66
+ - **Multi-scenario Recommendation**: Supports ranking (CTR/CVR), retrieval, multi-task learning, and generative recommendation models such as TIGER and HSTU — with more models continuously added.
67
+ - **Unified Feature Engineering & Data Pipeline**: Provides Dense/Sparse/Sequence feature definitions, persistent DataProcessor, and optimized RecDataLoader, forming a complete “Define → Process → Load” workflow.
68
+ - **Efficient Training & Evaluation**: A standardized training engine with optimizers, LR schedulers, early stopping, checkpoints, and logging — ready out-of-the-box.
69
+ - **Developer-friendly Engineering Experience**: Modular and extensible design, full tutorial support, GPU/MPS acceleration, and visualization tools.
70
+
71
+ ---
72
+
73
+ ## Installation
74
+
75
+ NextRec supports installation via **UV** or traditional **pip/source installation**.
76
+
77
+ ### Option 1: Using UV (Recommended)
78
+
79
+ UV is a modern, high-performance Python package manager offering fast dependency resolution and installation.
80
+
81
+ ```bash
82
+ git clone https://github.com/zerolovesea/NextRec.git
83
+ cd NextRec
84
+
85
+ # Install UV if not already installed
86
+ pip install uv
87
+
88
+ # Create virtual environment and install dependencies
89
+ uv sync
90
+
91
+ # Activate the virtual environment
92
+ source .venv/bin/activate # macOS/Linux
93
+ # or
94
+ .venv\Scripts\activate # Windows
95
+
96
+ # Install the package in editable mode
97
+ uv pip install -e .
98
+ ```
99
+
100
+ **Note**: Make sure to deactivate any other conda/virtual environments before running `uv sync` to avoid environment conflicts.
101
+
102
+ ### Option 2: Using pip/source installation
103
+
104
+ ```bash
105
+ git clone https://github.com/zerolovesea/NextRec.git
106
+ cd NextRec
107
+
108
+ # Install dependencies
109
+ pip install -r requirements.txt
110
+ pip install -r test_requirements.txt
111
+
112
+ # Install the package in editable mode
113
+ pip install -e .
114
+ ```
115
+
116
+ ---
117
+
118
+ ## 5-Minute Quick Start
119
+
120
+ The following example demonstrates a full DeepFM training & inference pipeline using the MovieLens dataset:
121
+
122
+ ```python
123
+ import pandas as pd
124
+
125
+ from nextrec.models.ranking.deepfm import DeepFM
126
+ from nextrec.basic.features import DenseFeature, SparseFeature, SequenceFeature
127
+
128
+ df = pd.read_csv("dataset/movielens_100k.csv")
129
+
130
+ target = 'label'
131
+ dense_features = [DenseFeature('age')]
132
+ sparse_features = [
133
+ SparseFeature('user_id', vocab_size=df['user_id'].max()+1, embedding_dim=4),
134
+ SparseFeature('item_id', vocab_size=df['item_id'].max()+1, embedding_dim=4),
135
+ ]
136
+
137
+ sparse_features.append(SparseFeature('gender', vocab_size=df['gender'].max()+1, embedding_dim=4))
138
+ sparse_features.append(SparseFeature('occupation', vocab_size=df['occupation'].max()+1, embedding_dim=4))
139
+
140
+ model = DeepFM(
141
+ dense_features=dense_features,
142
+ sparse_features=sparse_features,
143
+ mlp_params={"dims": [256, 128], "activation": "relu", "dropout": 0.5},
144
+ target=target,
145
+ device='cpu',
146
+ model_id="deepfm_with_processor",
147
+ embedding_l1_reg=1e-6,
148
+ dense_l1_reg=1e-5,
149
+ embedding_l2_reg=1e-5,
150
+ dense_l2_reg=1e-4,
151
+ )
152
+
153
+ model.compile(optimizer="adam", optimizer_params={"lr": 1e-3, "weight_decay": 1e-5}, loss="bce")
154
+ model.fit(train_data=df, metrics=['auc', 'recall', 'precision'], epochs=10, batch_size=512, shuffle=True, verbose=1)
155
+ preds = model.predict(df)
156
+ print(f'preds: {preds}')
157
+ ```
158
+
159
+ ### More Tutorials
160
+
161
+ The `tutorials/` directory provides examples for ranking, retrieval, multi-task learning, and data processing:
162
+
163
+ - `movielen_match_dssm.py` — DSSM retrieval on MovieLens 100k
164
+ - `movielen_ranking_deepfm.py` — DeepFM ranking on MovieLens 100k
165
+ - `example_ranking_din.py` — DIN (Deep Interest Network) example
166
+ - `example_match_dssm.py` — DSSM retrieval example
167
+ - `example_multitask.py` — Multi-task learning example
168
+
169
+ ---
170
+
171
+ ## Data Processing Example
172
+
173
+ NextRec offers a unified interface for preprocessing sparse and sequence features:
174
+
175
+ ```python
176
+ import pandas as pd
177
+ from nextrec.data.preprocessor import DataProcessor
178
+
179
+ df = pd.read_csv("dataset/movielens_100k.csv")
180
+
181
+ processor = DataProcessor()
182
+ processor.add_sparse_feature('movie_title', encode_method='hash', hash_size=1000)
183
+ processor.fit(df)
184
+
185
+ df = processor.transform(df, return_dict=False)
186
+
187
+ print("\nSample training data:")
188
+ print(df.head())
189
+ ```
190
+
191
+ ---
192
+
193
+ ## Supported Models
194
+
195
+ ### Ranking Models
196
+
197
+ | Model | Paper | Year | Status |
198
+ |-------|-------|------|--------|
199
+ | **FM** | Factorization Machines | ICDM 2010 | Supported |
200
+ | **AFM** | Attentional Factorization Machines: Learning the Weight of Feature Interactions via Attention Networks | IJCAI 2017 | Supported |
201
+ | **DeepFM** | DeepFM: A Factorization-Machine based Neural Network for CTR Prediction | IJCAI 2017 | Supported |
202
+ | **Wide&Deep** | Wide & Deep Learning for Recommender Systems | DLRS 2016 | Supported |
203
+ | **xDeepFM** | xDeepFM: Combining Explicit and Implicit Feature Interactions | KDD 2018 | Supported |
204
+ | **FiBiNET** | FiBiNET: Combining Feature Importance and Bilinear Feature Interaction for CTR Prediction | RecSys 2019 | Supported |
205
+ | **PNN** | Product-based Neural Networks for User Response Prediction | ICDM 2016 | Supported |
206
+ | **AutoInt** | AutoInt: Automatic Feature Interaction Learning | CIKM 2019 | Supported |
207
+ | **DCN** | Deep & Cross Network for Ad Click Predictions | ADKDD 2017 | Supported |
208
+ | **DIN** | Deep Interest Network for CTR Prediction | KDD 2018 | Supported |
209
+ | **DIEN** | Deep Interest Evolution Network | AAAI 2019 | Supported |
210
+ | **MaskNet** | MaskNet: Feature-wise Gating Blocks for High-dimensional Sparse Recommendation Data | 2020 | Supported |
211
+
212
+ ### Retrieval Models
213
+
214
+ | Model | Paper | Year | Status |
215
+ |-------|-------|------|--------|
216
+ | **DSSM** | Learning Deep Structured Semantic Models | CIKM 2013 | Supported |
217
+ | **DSSM v2** | DSSM with pairwise BPR-style optimization | - | Supported |
218
+ | **YouTube DNN** | Deep Neural Networks for YouTube Recommendations | RecSys 2016 | Supported |
219
+ | **MIND** | Multi-Interest Network with Dynamic Routing | CIKM 2019 | Supported |
220
+ | **SDM** | Sequential Deep Matching Model | - | Supported |
221
+
222
+ ### Multi-task Models
223
+
224
+ | Model | Paper | Year | Status |
225
+ |-------|-------|------|--------|
226
+ | **MMOE** | Modeling Task Relationships in Multi-task Learning | KDD 2018 | Supported |
227
+ | **PLE** | Progressive Layered Extraction | RecSys 2020 | Supported |
228
+ | **ESMM** | Entire Space Multi-task Model | SIGIR 2018 | Supported |
229
+ | **ShareBottom** | Multitask Learning | - | Supported |
230
+
231
+ ### Generative Models
232
+
233
+ | Model | Paper | Year | Status |
234
+ |-------|-------|------|--------|
235
+ | **TIGER** | Recommender Systems with Generative Retrieval | NeurIPS 2023 | In Progress |
236
+ | **HSTU** | Hierarchical Sequential Transduction Units | - | In Progress |
237
+
238
+ ---
239
+
240
+ ## Contributing
241
+
242
+ We welcome contributions of any form!
243
+
244
+ ### How to Contribute
245
+
246
+ 1. Fork the repository
247
+ 2. Create your feature branch (`git checkout -b feature/AmazingFeature`)
248
+ 3. Commit your changes (`git commit -m 'Add AmazingFeature'`)
249
+ 4. Push your branch (`git push origin feature/AmazingFeature`)
250
+ 5. Open a Pull Request
251
+
252
+ > Before submitting a PR, please run tests using `pytest test/ -v` or `python -m pytest` to ensure everything passes.
253
+
254
+ ### Code Style
255
+
256
+ - Follow PEP8
257
+ - Provide unit tests for new functionality
258
+ - Update documentation accordingly
259
+
260
+ ### Reporting Issues
261
+
262
+ When submitting issues on GitHub, please include:
263
+
264
+ - Description of the problem
265
+ - Reproduction steps
266
+ - Expected behavior
267
+ - Actual behavior
268
+ - Environment info (Python version, PyTorch version, etc.)
269
+
270
+ ---
271
+
272
+ ## License
273
+
274
+ This project is licensed under the [Apache 2.0 License](./LICENSE).
275
+
276
+ ---
277
+
278
+ ## Contact
279
+
280
+ - **GitHub Issues**: Submit issues on GitHub
281
+ - **Email**: zyaztec@gmail.com
282
+
283
+ ---
284
+
285
+ ## Acknowledgements
286
+
287
+ NextRec is inspired by the following great open-source projects:
288
+
289
+ - **torch-rechub** - A Lighting Pytorch Framework for Recommendation Models, Easy-to-use and Easy-to-extend.
290
+ - **FuxiCTR** — Configurable and reproducible CTR prediction library
291
+ - **RecBole** — Unified and efficient recommendation library
292
+ - **PaddleRec** — Large-scale recommendation algorithm library
293
+
294
+ Special thanks to all open-source contributors!
295
+
296
+ ---
297
+
298
+ <div align="center">
299
+
300
+ **[Back to Top](#nextrec)**
301
+
302
+ </div>
@@ -0,0 +1,51 @@
1
+ nextrec/__init__.py,sha256=CvocnY2uBp0cjNkhrT6ogw0q2bN9s1GNp754FLO-7lo,1117
2
+ nextrec/__version__.py,sha256=rnObPjuBcEStqSO0S6gsdS_ot8ITOQjVj_-P1LUUYpg,22
3
+ nextrec/basic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ nextrec/basic/activation.py,sha256=XJDTFzmacpLq8DMNbFVhZ3WhlOmKDE88vp0udnVpXtE,2808
5
+ nextrec/basic/callback.py,sha256=c0QeolbPJzCYhJnPf9rrZwFU13zmLxg59nvQGbpetNo,1039
6
+ nextrec/basic/dataloader.py,sha256=roG1a7VRPpdy9XBv0rJg7wz00ggok9WNHU_EoDIxY2c,18898
7
+ nextrec/basic/features.py,sha256=wJbiDqE_qWA5gArUm-NYHaLgk7AMxpA7loaovf84dSU,2526
8
+ nextrec/basic/layers.py,sha256=dvMir_0PJQfZv0uCUeqyiJpb-QOz0f2CUu2Cuuxh7iA,38300
9
+ nextrec/basic/loggers.py,sha256=0fupxPiHrKcBEJTBm0Sjcim0rU-n0gYKuy6IiCYX1Bw,3480
10
+ nextrec/basic/metrics.py,sha256=p79-IRRprLcXjjicrG41vM0zwRGtUY5tTPoybpvz-io,20402
11
+ nextrec/basic/model.py,sha256=Z6U4p5i-lNY0ypZWoR3PAcQc1d3XyiEAasUl6Z3AQf4,65859
12
+ nextrec/data/__init__.py,sha256=vvBNAdHcVO54aaaT-SyYHWsPHhoH8GvrlZ2hMRjqyF8,524
13
+ nextrec/data/data_utils.py,sha256=rpcj5CIWw8RlLn1NYva_gEOlpYG1cy65rB1BSv23XAM,4113
14
+ nextrec/data/preprocessor.py,sha256=0gYc_nH6ek3QxgncSZ8B8KyYmIYdCFMx9rSEdo4-aFw,26442
15
+ nextrec/loss/__init__.py,sha256=kBanUB5rxQKwXTd6f-2hOI_CF7cp_MClAwAeVXIkpig,647
16
+ nextrec/loss/loss_utils.py,sha256=3zeeLBG4lNIXCO94jx-BYlSHl14t-U7L06dQuzVSPJ8,4752
17
+ nextrec/loss/match_losses.py,sha256=BaH4GKVSFU_PNhHPP_JuAM5zwjOIPxcbuNLYpK0-EWA,11652
18
+ nextrec/models/generative/hstu.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
+ nextrec/models/generative/tiger.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
+ nextrec/models/match/__init__.py,sha256=ASZB5abqKPhDbk8NErNNNa0DHuWpsVxvUtyEn5XMx6Y,215
21
+ nextrec/models/match/dssm.py,sha256=rl-8-4pG5_DgxM0VYZuPzLP1lhvDF1BbQQoqxBMMqYw,7702
22
+ nextrec/models/match/dssm_v2.py,sha256=eyWrCo8g8y-e_fq5168iTA_xuHdYWBT9v96GaPor6-o,6407
23
+ nextrec/models/match/mind.py,sha256=5g7b-VOShPQ9D4FG-1z8exKYFLJS_z_Lt2bvU_qSC98,8735
24
+ nextrec/models/match/sdm.py,sha256=rJK49438-49JvzNQX2Vi6Zn1sn9twjyOb2YG2lVaGlc,10238
25
+ nextrec/models/match/youtube_dnn.py,sha256=Su5kwrHGRXrv_4psXZgr9hXpUF5bdosXqdmtHb5J2Vs,6834
26
+ nextrec/models/multi_task/esmm.py,sha256=0dn8pJ7BAQh5oqpNZISSiTb6sgXddsG99eOdpQVMSTU,4817
27
+ nextrec/models/multi_task/mmoe.py,sha256=vly9c8e-Xc_m9AjWUmTGtidf67bjiHPwwbAFbXc0XpM,6099
28
+ nextrec/models/multi_task/ple.py,sha256=mM8shre3BX-sg_peokMh35_-wQAMG5UI2eUfhyRzTgs,11269
29
+ nextrec/models/multi_task/share_bottom.py,sha256=MzShugQya1rSovhbvmTDD4Uf1MRCGfgIKqKXVsz0RTo,4451
30
+ nextrec/models/ranking/__init__.py,sha256=GMn3R5TkE9W17dzHuQoASJsQCoM_EIHuUhnMS2jMdZw,291
31
+ nextrec/models/ranking/afm.py,sha256=BZvGyJZ9aAoL3P8ebsMoQ9HqX2UyKkFdktfz3_VMalA,4483
32
+ nextrec/models/ranking/autoint.py,sha256=D9jeEP0w-IssbporOIPzTzi6PveiYVcgN7D6AXYxyLc,5580
33
+ nextrec/models/ranking/dcn.py,sha256=HyXXzooS1zqOWU6MAPi6tBdmDs4o64HP9vBV5fYdKO4,4134
34
+ nextrec/models/ranking/deepfm.py,sha256=Yl95d4r0dytcZSn4A8ukgxOQ8eaF0t5MqDd9KPfkdPI,3453
35
+ nextrec/models/ranking/dien.py,sha256=2maimf_c6L-I0JpJNbmpIjbMV8uCndrdFiqvjwxMaj8,8401
36
+ nextrec/models/ranking/din.py,sha256=Qs4IxfvCmT2lGtZ6BvgdzMoT0lCy88yaXE1FecaMo2c,7122
37
+ nextrec/models/ranking/fibinet.py,sha256=h6a738bo3VikKHKZhOzk_p9YGNs7hWcpEOkJvOMDR88,4779
38
+ nextrec/models/ranking/fm.py,sha256=WsbQV8RUc2O7b66GRZicNWaWOtin_QLO8e_Skjk5aIY,2887
39
+ nextrec/models/ranking/masknet.py,sha256=ADki3oMR7PwWgcf5GhIUQJxto-gFNmIlU-GRsdi04Jk,4565
40
+ nextrec/models/ranking/pnn.py,sha256=ZhsUh-O_kLJLfK28dp81DMGYnzMkO-L86CgESlT2TB0,4883
41
+ nextrec/models/ranking/widedeep.py,sha256=7EylqHFaxrclRr-PVhKRxBLOOf8E5-AJbWfJqZpdzy0,3642
42
+ nextrec/models/ranking/xdeepfm.py,sha256=p2PrQHxmvABdQl1wLnP5VyRy5Chdp7Xcw1FJw7m1LFY,4200
43
+ nextrec/utils/__init__.py,sha256=-wyEzZrYQ9QL5zPbWdBIWzg-HbT-2wmmbH2Kceuzlzk,510
44
+ nextrec/utils/common.py,sha256=-LrRY1MFAhgeyZkKyqdVQGxev6eH3gigNtlRKw5f8Iw,214
45
+ nextrec/utils/embedding.py,sha256=Xl5bXAdxdGc0FV3FthNqJe9MP0M_rZI1uaOlPi3vLj8,478
46
+ nextrec/utils/initializer.py,sha256=ka5sgXWqAb9x5hQS6ypgonR93OUajBVUAwO7q-JPjIE,1660
47
+ nextrec/utils/optimizer.py,sha256=g9IETUdflM89YKSzInP_iS_hTnDy_cjpm6Wcq9V9_vE,2468
48
+ nextrec-0.1.1.dist-info/METADATA,sha256=AvRHx-l50RENWL-w5EA2xSwNEmtxF1mqz1XHddfGSKY,11113
49
+ nextrec-0.1.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
50
+ nextrec-0.1.1.dist-info/licenses/LICENSE,sha256=2fQfVKeafywkni7MYHyClC6RGGC3laLTXCNBx-ubtp0,1064
51
+ nextrec-0.1.1.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 NextRec
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.