nextrec 0.2.7__tar.gz → 0.3.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (126) hide show
  1. nextrec-0.3.2/PKG-INFO +312 -0
  2. nextrec-0.3.2/README.md +256 -0
  3. nextrec-0.3.2/README_zh.md +253 -0
  4. nextrec-0.3.2/asserts/Feature Configuration.png +0 -0
  5. nextrec-0.3.2/asserts/Model Parameters.png +0 -0
  6. nextrec-0.3.2/asserts/Training Configuration.png +0 -0
  7. nextrec-0.3.2/asserts/Training logs.png +0 -0
  8. nextrec-0.3.2/asserts/logo.png +0 -0
  9. nextrec-0.3.2/asserts/mmoe_tutorial.png +0 -0
  10. nextrec-0.3.2/asserts/nextrec_diagram_en.png +0 -0
  11. nextrec-0.3.2/asserts/nextrec_diagram_zh.png +0 -0
  12. nextrec-0.3.2/asserts/test data.png +0 -0
  13. nextrec-0.3.2/docs/en/Getting started guide.md +105 -0
  14. {nextrec-0.2.7 → nextrec-0.3.2}/docs/rtd/conf.py +1 -1
  15. nextrec-0.3.2/docs/zh//345/277/253/351/200/237/344/270/212/346/211/213.md +105 -0
  16. nextrec-0.3.2/nextrec/__version__.py +1 -0
  17. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/basic/activation.py +4 -8
  18. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/basic/callback.py +1 -1
  19. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/basic/features.py +33 -25
  20. nextrec-0.3.2/nextrec/basic/layers.py +543 -0
  21. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/basic/loggers.py +4 -5
  22. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/basic/metrics.py +39 -115
  23. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/basic/model.py +257 -177
  24. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/basic/session.py +1 -5
  25. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/data/__init__.py +12 -0
  26. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/data/data_utils.py +3 -27
  27. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/data/dataloader.py +26 -34
  28. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/data/preprocessor.py +2 -1
  29. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/loss/listwise.py +6 -4
  30. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/loss/loss_utils.py +10 -6
  31. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/loss/pairwise.py +5 -3
  32. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/loss/pointwise.py +7 -13
  33. nextrec-0.3.2/nextrec/models/generative/__init__.py +5 -0
  34. nextrec-0.3.2/nextrec/models/generative/hstu.py +399 -0
  35. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/match/mind.py +110 -1
  36. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/multi_task/esmm.py +46 -27
  37. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/multi_task/mmoe.py +48 -30
  38. nextrec-0.3.2/nextrec/models/multi_task/ple.py +275 -0
  39. nextrec-0.3.2/nextrec/models/multi_task/poso.py +413 -0
  40. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/multi_task/share_bottom.py +43 -26
  41. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/ranking/__init__.py +2 -0
  42. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/ranking/dcn.py +20 -1
  43. nextrec-0.3.2/nextrec/models/ranking/dcn_v2.py +84 -0
  44. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/ranking/deepfm.py +44 -18
  45. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/ranking/dien.py +130 -27
  46. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/ranking/masknet.py +13 -67
  47. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/ranking/widedeep.py +39 -18
  48. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/ranking/xdeepfm.py +34 -1
  49. nextrec-0.3.2/nextrec/utils/common.py +41 -0
  50. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/utils/optimizer.py +7 -3
  51. {nextrec-0.2.7 → nextrec-0.3.2}/pyproject.toml +1 -1
  52. {nextrec-0.2.7 → nextrec-0.3.2}/test/test_layers.py +3 -3
  53. {nextrec-0.2.7 → nextrec-0.3.2}/tutorials/example_match_dssm.py +0 -2
  54. {nextrec-0.2.7 → nextrec-0.3.2}/tutorials/example_multitask.py +25 -31
  55. {nextrec-0.2.7 → nextrec-0.3.2}/tutorials/example_ranking_din.py +29 -64
  56. nextrec-0.3.2/tutorials/movielen_match_dssm.py +121 -0
  57. nextrec-0.3.2/tutorials/movielen_ranking_deepfm.py +60 -0
  58. nextrec-0.3.2/tutorials/notebooks/en/Hands on dataprocessor.ipynb +850 -0
  59. nextrec-0.3.2/tutorials/notebooks/en/Hands on nextrec.ipynb +1652 -0
  60. nextrec-0.3.2/tutorials/notebooks/zh/Hands on dataprocessor.ipynb +850 -0
  61. {nextrec-0.2.7 → nextrec-0.3.2}/tutorials/notebooks/zh/Hands on nextrec.ipynb +1 -1
  62. nextrec-0.2.7/PKG-INFO +0 -281
  63. nextrec-0.2.7/README.md +0 -225
  64. nextrec-0.2.7/README_zh.md +0 -222
  65. nextrec-0.2.7/docs/zh//345/277/253/351/200/237/344/270/212/346/211/213.md +0 -97
  66. nextrec-0.2.7/nextrec/__version__.py +0 -1
  67. nextrec-0.2.7/nextrec/basic/layers.py +0 -980
  68. nextrec-0.2.7/nextrec/models/generative/hstu.py +0 -0
  69. nextrec-0.2.7/nextrec/models/multi_task/ple.py +0 -260
  70. nextrec-0.2.7/nextrec/utils/common.py +0 -16
  71. nextrec-0.2.7/tutorials/movielen_match_dssm.py +0 -133
  72. nextrec-0.2.7/tutorials/movielen_ranking_deepfm.py +0 -66
  73. nextrec-0.2.7/tutorials/notebooks/zh/Hands on dataprocessor.ipynb +0 -1368
  74. {nextrec-0.2.7 → nextrec-0.3.2}/.github/workflows/publish.yml +0 -0
  75. {nextrec-0.2.7 → nextrec-0.3.2}/.github/workflows/tests.yml +0 -0
  76. {nextrec-0.2.7 → nextrec-0.3.2}/.gitignore +0 -0
  77. {nextrec-0.2.7 → nextrec-0.3.2}/.readthedocs.yaml +0 -0
  78. {nextrec-0.2.7 → nextrec-0.3.2}/CODE_OF_CONDUCT.md +0 -0
  79. {nextrec-0.2.7 → nextrec-0.3.2}/CONTRIBUTING.md +0 -0
  80. {nextrec-0.2.7 → nextrec-0.3.2}/LICENSE +0 -0
  81. {nextrec-0.2.7 → nextrec-0.3.2}/MANIFEST.in +0 -0
  82. {nextrec-0.2.7 → nextrec-0.3.2}/dataset/ctcvr_task.csv +0 -0
  83. {nextrec-0.2.7 → nextrec-0.3.2}/dataset/match_task.csv +0 -0
  84. {nextrec-0.2.7 → nextrec-0.3.2}/dataset/movielens_100k.csv +0 -0
  85. {nextrec-0.2.7 → nextrec-0.3.2}/dataset/multitask_task.csv +0 -0
  86. {nextrec-0.2.7 → nextrec-0.3.2}/dataset/ranking_task.csv +0 -0
  87. {nextrec-0.2.7 → nextrec-0.3.2}/docs/rtd/Makefile +0 -0
  88. {nextrec-0.2.7 → nextrec-0.3.2}/docs/rtd/index.md +0 -0
  89. {nextrec-0.2.7 → nextrec-0.3.2}/docs/rtd/make.bat +0 -0
  90. {nextrec-0.2.7 → nextrec-0.3.2}/docs/rtd/modules.rst +0 -0
  91. {nextrec-0.2.7 → nextrec-0.3.2}/docs/rtd/nextrec.basic.rst +0 -0
  92. {nextrec-0.2.7 → nextrec-0.3.2}/docs/rtd/nextrec.data.rst +0 -0
  93. {nextrec-0.2.7 → nextrec-0.3.2}/docs/rtd/nextrec.loss.rst +0 -0
  94. {nextrec-0.2.7 → nextrec-0.3.2}/docs/rtd/nextrec.rst +0 -0
  95. {nextrec-0.2.7 → nextrec-0.3.2}/docs/rtd/nextrec.utils.rst +0 -0
  96. {nextrec-0.2.7 → nextrec-0.3.2}/docs/rtd/requirements.txt +0 -0
  97. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/__init__.py +0 -0
  98. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/basic/__init__.py +0 -0
  99. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/loss/__init__.py +0 -0
  100. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/generative/tiger.py +0 -0
  101. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/match/__init__.py +0 -0
  102. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/match/dssm.py +0 -0
  103. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/match/dssm_v2.py +0 -0
  104. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/match/sdm.py +0 -0
  105. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/match/youtube_dnn.py +0 -0
  106. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/ranking/afm.py +0 -0
  107. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/ranking/autoint.py +0 -0
  108. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/ranking/din.py +0 -0
  109. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/ranking/fibinet.py +0 -0
  110. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/ranking/fm.py +0 -0
  111. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/models/ranking/pnn.py +0 -0
  112. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/utils/__init__.py +0 -0
  113. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/utils/embedding.py +0 -0
  114. {nextrec-0.2.7 → nextrec-0.3.2}/nextrec/utils/initializer.py +0 -0
  115. {nextrec-0.2.7 → nextrec-0.3.2}/pytest.ini +0 -0
  116. {nextrec-0.2.7 → nextrec-0.3.2}/requirements.txt +0 -0
  117. {nextrec-0.2.7 → nextrec-0.3.2}/test/__init__.py +0 -0
  118. {nextrec-0.2.7 → nextrec-0.3.2}/test/conftest.py +0 -0
  119. {nextrec-0.2.7 → nextrec-0.3.2}/test/run_tests.py +0 -0
  120. {nextrec-0.2.7 → nextrec-0.3.2}/test/test_losses.py +0 -0
  121. {nextrec-0.2.7 → nextrec-0.3.2}/test/test_match_models.py +0 -0
  122. {nextrec-0.2.7 → nextrec-0.3.2}/test/test_multitask_models.py +0 -0
  123. {nextrec-0.2.7 → nextrec-0.3.2}/test/test_preprocessor.py +0 -0
  124. {nextrec-0.2.7 → nextrec-0.3.2}/test/test_ranking_models.py +0 -0
  125. {nextrec-0.2.7 → nextrec-0.3.2}/test/test_utils.py +0 -0
  126. {nextrec-0.2.7 → nextrec-0.3.2}/test_requirements.txt +0 -0
nextrec-0.3.2/PKG-INFO ADDED
@@ -0,0 +1,312 @@
1
+ Metadata-Version: 2.4
2
+ Name: nextrec
3
+ Version: 0.3.2
4
+ Summary: A comprehensive recommendation library with match, ranking, and multi-task learning models
5
+ Project-URL: Homepage, https://github.com/zerolovesea/NextRec
6
+ Project-URL: Repository, https://github.com/zerolovesea/NextRec
7
+ Project-URL: Documentation, https://github.com/zerolovesea/NextRec/blob/main/README.md
8
+ Project-URL: Issues, https://github.com/zerolovesea/NextRec/issues
9
+ Author-email: zerolovesea <zyaztec@gmail.com>
10
+ License-File: LICENSE
11
+ Keywords: ctr,deep-learning,match,pytorch,ranking,recommendation
12
+ Classifier: Development Status :: 3 - Alpha
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: Intended Audience :: Science/Research
15
+ Classifier: License :: OSI Approved :: Apache Software License
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Programming Language :: Python :: 3.12
20
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
21
+ Requires-Python: >=3.10
22
+ Requires-Dist: numpy<2.0,>=1.21; sys_platform == 'linux' and python_version < '3.12'
23
+ Requires-Dist: numpy<3.0,>=1.26; sys_platform == 'linux' and python_version >= '3.12'
24
+ Requires-Dist: numpy>=1.23.0; sys_platform == 'win32'
25
+ Requires-Dist: numpy>=1.24.0; sys_platform == 'darwin'
26
+ Requires-Dist: pandas<2.0,>=1.5; sys_platform == 'linux' and python_version < '3.12'
27
+ Requires-Dist: pandas<2.3.0,>=2.1.0; sys_platform == 'win32'
28
+ Requires-Dist: pandas>=2.0.0; sys_platform == 'darwin'
29
+ Requires-Dist: pandas>=2.1.0; sys_platform == 'linux' and python_version >= '3.12'
30
+ Requires-Dist: pyarrow<13.0.0,>=10.0.0; sys_platform == 'linux' and python_version < '3.12'
31
+ Requires-Dist: pyarrow<15.0.0,>=12.0.0; sys_platform == 'win32'
32
+ Requires-Dist: pyarrow>=12.0.0; sys_platform == 'darwin'
33
+ Requires-Dist: pyarrow>=16.0.0; sys_platform == 'linux' and python_version >= '3.12'
34
+ Requires-Dist: scikit-learn<2.0,>=1.2; sys_platform == 'linux' and python_version < '3.12'
35
+ Requires-Dist: scikit-learn>=1.3.0; sys_platform == 'darwin'
36
+ Requires-Dist: scikit-learn>=1.3.0; sys_platform == 'linux' and python_version >= '3.12'
37
+ Requires-Dist: scikit-learn>=1.3.0; sys_platform == 'win32'
38
+ Requires-Dist: scipy<1.12,>=1.8; sys_platform == 'linux' and python_version < '3.12'
39
+ Requires-Dist: scipy>=1.10.0; sys_platform == 'darwin'
40
+ Requires-Dist: scipy>=1.10.0; sys_platform == 'win32'
41
+ Requires-Dist: scipy>=1.11.0; sys_platform == 'linux' and python_version >= '3.12'
42
+ Requires-Dist: torch>=2.0.0
43
+ Requires-Dist: torchvision>=0.15.0
44
+ Requires-Dist: tqdm>=4.65.0
45
+ Provides-Extra: dev
46
+ Requires-Dist: jupyter>=1.0.0; extra == 'dev'
47
+ Requires-Dist: matplotlib>=3.7.0; extra == 'dev'
48
+ Requires-Dist: pytest-cov>=4.1.0; extra == 'dev'
49
+ Requires-Dist: pytest-html>=3.2.0; extra == 'dev'
50
+ Requires-Dist: pytest-mock>=3.11.0; extra == 'dev'
51
+ Requires-Dist: pytest-timeout>=2.1.0; extra == 'dev'
52
+ Requires-Dist: pytest-xdist>=3.3.0; extra == 'dev'
53
+ Requires-Dist: pytest>=7.4.0; extra == 'dev'
54
+ Requires-Dist: seaborn>=0.12.0; extra == 'dev'
55
+ Description-Content-Type: text/markdown
56
+
57
+ <p align="center">
58
+ <img align="center" src="asserts/logo.png" width="40%">
59
+ <p>
60
+
61
+ <div align="center">
62
+
63
+ ![Python](https://img.shields.io/badge/Python-3.10+-blue.svg)
64
+ ![PyTorch](https://img.shields.io/badge/PyTorch-1.10+-ee4c2c.svg)
65
+ ![License](https://img.shields.io/badge/License-Apache%202.0-green.svg)
66
+ ![Version](https://img.shields.io/badge/Version-0.3.2-orange.svg)
67
+
68
+ English | [中文文档](README_zh.md)
69
+
70
+ **A Unified, Efficient, and Scalable Recommendation System Framework**
71
+
72
+ </div>
73
+
74
+ ## Introduction
75
+
76
+ NextRec is a modern recommendation framework built on PyTorch, delivering a unified experience for modeling, training, and evaluation. It follows a modular design with rich model implementations, data-processing utilities, and engineering-ready training components. NextRec focuses on large-scale industrial recall scenarios on Spark clusters, training on massive offline parquet features.
77
+
78
+ ## Why NextRec
79
+
80
+ - **Unified feature engineering & data pipeline**: Dense/Sparse/Sequence feature definitions, persistent DataProcessor, and batch-optimized RecDataLoader, matching offline feature training/inference in industrial big-data settings.
81
+ - **Multi-scenario coverage**: Ranking (CTR/CVR), retrieval, multi-task learning, and more marketing/rec models, with a continuously expanding model zoo.
82
+ - **Developer-friendly experience**: Stream processing/training/inference for csv/parquet/pathlike data, plus GPU/MPS acceleration and visualization support.
83
+ - **Efficient training & evaluation**: Standardized engine with optimizers, LR schedulers, early stopping, checkpoints, and detailed logging out of the box.
84
+
85
+ ## Architecture
86
+
87
+ NextRec adopts a modular and low-coupling engineering design, enabling full-pipeline reusability and scalability across data processing → model construction → training & evaluation → inference & deployment. Its core components include: a Feature-Spec-driven Embedding architecture, the BaseModel abstraction, a set of independent reusable Layers, a unified DataLoader for both training and inference, and a ready-to-use Model Zoo.
88
+
89
+ ![NextRec Architecture](asserts/nextrec_diagram_en.png)
90
+
91
+ > The project borrows ideas from excellent open-source rec libraries. Early layers referenced [torch-rechub](https://github.com/datawhalechina/torch-rechub) but have been replaced with in-house implementations. torch-rechub remains mature in architecture and models; the author contributed a bit there—feel free to check it out.
92
+
93
+ ---
94
+
95
+ ## Installation
96
+
97
+ You can quickly install the latest NextRec via `pip install nextrec`; Python 3.10+ is required.
98
+
99
+ ## Tutorials
100
+
101
+ See `tutorials/` for examples covering ranking, retrieval, multi-task learning, and data processing:
102
+
103
+ - [movielen_ranking_deepfm.py](/tutorials/movielen_ranking_deepfm.py) — DeepFM training on MovieLens 100k
104
+ - [example_ranking_din.py](/tutorials/example_ranking_din.py) — DIN training on the e-commerce dataset
105
+ - [example_multitask.py](/tutorials/example_multitask.py) — ESMM multi-task training on the e-commerce dataset
106
+ - [movielen_match_dssm.py](/tutorials/example_match_dssm.py) — DSSM retrieval on MovieLens 100k
107
+
108
+ To dive deeper, Jupyter notebooks are available:
109
+
110
+ - [Hands on the NextRec framework](/tutorials/notebooks/en/Hands%20on%20nextrec.ipynb)
111
+ - [Using the data processor for preprocessing](/tutorials/notebooks/en/Hands%20on%20dataprocessor.ipynb)
112
+
113
+ > Current version [0.3.2]: the matching module is not fully polished yet and may have compatibility issues or unexpected errors. Please raise an issue if you run into problems.
114
+
115
+ ## 5-Minute Quick Start
116
+
117
+ We provide a detailed quick start and paired datasets to help you learn the framework. In `datasets/` you’ll find an e-commerce sample dataset like this:
118
+
119
+ | user_id | item_id | dense_0 | dense_1 | dense_2 | dense_3 | dense_4 | dense_5 | dense_6 | dense_7 | sparse_0 | sparse_1 | sparse_2 | sparse_3 | sparse_4 | sparse_5 | sparse_6 | sparse_7 | sparse_8 | sparse_9 | sequence_0 | sequence_1 | label |
120
+ |--------|---------|-------------|-------------|-------------|------------|-------------|-------------|-------------|-------------|----------|----------|----------|----------|----------|----------|----------|----------|----------|----------|-----------------------------------------------------------|-----------------------------------------------------------|-------|
121
+ | 1 | 7817 | 0.14704075 | 0.31020382 | 0.77780896 | 0.944897 | 0.62315375 | 0.57124174 | 0.77009535 | 0.3211029 | 315 | 260 | 379 | 146 | 168 | 161 | 138 | 88 | 5 | 312 | [170,175,97,338,105,353,272,546,175,545,463,128,0,0,0] | [368,414,820,405,548,63,327,0,0,0,0,0,0,0,0] | 0 |
122
+ | 1 | 3579 | 0.77811223 | 0.80359334 | 0.5185201 | 0.91091245 | 0.043562356 | 0.82142705 | 0.8803686 | 0.33748195 | 149 | 229 | 442 | 6 | 167 | 252 | 25 | 402 | 7 | 168 | [179,48,61,551,284,165,344,151,0,0,0,0,0,0,0] | [814,0,0,0,0,0,0,0,0,0,0,0,0,0,0] | 1 |
123
+
124
+ Below is a short example showing how to train a DIN model. DIN (Deep Interest Network) won Best Paper at KDD 2018 for CTR prediction. You can also run `python tutorials/example_ranking_din.py` directly.
125
+
126
+ After training, detailed logs are available under `nextrec_logs/din_tutorial`.
127
+
128
+ ```python
129
+ import pandas as pd
130
+
131
+ from nextrec.models.ranking.din import DIN
132
+ from nextrec.basic.features import DenseFeature, SparseFeature, SequenceFeature
133
+
134
+ df = pd.read_csv('dataset/ranking_task.csv')
135
+
136
+ for col in df.columns and 'sequence' in col: # csv loads lists as text; convert them back to objects
137
+ df[col] = df[col].apply(lambda x: eval(x) if isinstance(x, str) else x)
138
+
139
+ # Define feature columns
140
+ dense_features = [DenseFeature(name=f'dense_{i}', input_dim=1) for i in range(8)]
141
+
142
+ sparse_features = [SparseFeature(name='user_id', embedding_name='user_emb', vocab_size=int(df['user_id'].max() + 1), embedding_dim=32), SparseFeature(name='item_id', embedding_name='item_emb', vocab_size=int(df['item_id'].max() + 1), embedding_dim=32),]
143
+
144
+ sparse_features.extend([SparseFeature(name=f'sparse_{i}', embedding_name=f'sparse_{i}_emb', vocab_size=int(df[f'sparse_{i}'].max() + 1), embedding_dim=32) for i in range(10)])
145
+
146
+ sequence_features = [
147
+ SequenceFeature(name='sequence_0', vocab_size=int(df['sequence_0'].apply(lambda x: max(x)).max() + 1), embedding_dim=32, padding_idx=0, embedding_name='item_emb'),
148
+ SequenceFeature(name='sequence_1', vocab_size=int(df['sequence_1'].apply(lambda x: max(x)).max() + 1), embedding_dim=16, padding_idx=0, embedding_name='sparse_0_emb'),]
149
+
150
+ mlp_params = {
151
+ "dims": [256, 128, 64],
152
+ "activation": "relu",
153
+ "dropout": 0.3,
154
+ }
155
+
156
+ model = DIN(
157
+ dense_features=dense_features,
158
+ sparse_features=sparse_features,
159
+ sequence_features=sequence_features,
160
+ mlp_params=mlp_params,
161
+ attention_hidden_units=[80, 40],
162
+ attention_activation='sigmoid',
163
+ attention_use_softmax=True,
164
+ target=['label'], # target variable
165
+ device='mps',
166
+ embedding_l1_reg=1e-6,
167
+ embedding_l2_reg=1e-5,
168
+ dense_l1_reg=1e-5,
169
+ dense_l2_reg=1e-4,
170
+ session_id="din_tutorial", # experiment id for logs
171
+ )
172
+
173
+ # Compile model with optimizer and loss
174
+ model.compile(
175
+ optimizer = "adam",
176
+ optimizer_params = {"lr": 1e-3, "weight_decay": 1e-5},
177
+ loss = "focal",
178
+ loss_params={"gamma": 2.0, "alpha": 0.25},
179
+ )
180
+
181
+ model.fit(
182
+ train_data=df,
183
+ metrics=['auc', 'gauc', 'logloss'], # metrics to track
184
+ epochs=3,
185
+ batch_size=512,
186
+ shuffle=True,
187
+ user_id_column='user_id' # used for GAUC
188
+ )
189
+
190
+ # Evaluate after training
191
+ metrics = model.evaluate(
192
+ df,
193
+ metrics=['auc', 'gauc', 'logloss'],
194
+ batch_size=512,
195
+ user_id_column='user_id'
196
+ )
197
+ ```
198
+
199
+ ---
200
+
201
+ ## Supported Models
202
+
203
+ ### Ranking Models
204
+
205
+ | Model | Paper | Year | Status |
206
+ |-------|-------|------|--------|
207
+ | [FM](nextrec/models/ranking/fm.py) | Factorization Machines | ICDM 2010 | Supported |
208
+ | [AFM](nextrec/models/ranking/afm.py) | Attentional Factorization Machines: Learning the Weight of Feature Interactions via Attention Networks | IJCAI 2017 | Supported |
209
+ | [DeepFM](nextrec/models/ranking/deepfm.py) | DeepFM: A Factorization-Machine based Neural Network for CTR Prediction | IJCAI 2017 | Supported |
210
+ | [Wide&Deep](nextrec/models/ranking/widedeep.py) | Wide & Deep Learning for Recommender Systems | DLRS 2016 | Supported |
211
+ | [xDeepFM](nextrec/models/ranking/xdeepfm.py) | xDeepFM: Combining Explicit and Implicit Feature Interactions | KDD 2018 | Supported |
212
+ | [FiBiNET](nextrec/models/ranking/fibinet.py) | FiBiNET: Combining Feature Importance and Bilinear Feature Interaction for CTR Prediction | RecSys 2019 | Supported |
213
+ | [PNN](nextrec/models/ranking/pnn.py) | Product-based Neural Networks for User Response Prediction | ICDM 2016 | Supported |
214
+ | [AutoInt](nextrec/models/ranking/autoint.py) | AutoInt: Automatic Feature Interaction Learning | CIKM 2019 | Supported |
215
+ | [DCN](nextrec/models/ranking/dcn.py) | Deep & Cross Network for Ad Click Predictions | ADKDD 2017 | Supported |
216
+ | [DCN v2](nextrec/models/ranking/dcn_v2.py) | DCN V2: Improved Deep & Cross Network and Practical Lessons for Web-scale Learning to Rank Systems | KDD 2021 | In Progress |
217
+ | [DIN](nextrec/models/ranking/din.py) | Deep Interest Network for CTR Prediction | KDD 2018 | Supported |
218
+ | [DIEN](nextrec/models/ranking/dien.py) | Deep Interest Evolution Network | AAAI 2019 | Supported |
219
+ | [MaskNet](nextrec/models/ranking/masknet.py) | MaskNet: Feature-wise Gating Blocks for High-dimensional Sparse Recommendation Data | 2020 | Supported |
220
+
221
+ ### Retrieval Models
222
+
223
+ | Model | Paper | Year | Status |
224
+ |-------|-------|------|--------|
225
+ | [DSSM](nextrec/models/match/dssm.py) | Learning Deep Structured Semantic Models | CIKM 2013 | Supported |
226
+ | [DSSM v2](nextrec/models/match/dssm_v2.py) | DSSM with pairwise BPR-style optimization | - | Supported |
227
+ | [YouTube DNN](nextrec/models/match/youtube_dnn.py) | Deep Neural Networks for YouTube Recommendations | RecSys 2016 | Supported |
228
+ | [MIND](nextrec/models/match/mind.py) | Multi-Interest Network with Dynamic Routing | CIKM 2019 | Supported |
229
+ | [SDM](nextrec/models/match/sdm.py) | Sequential Deep Matching Model | - | Supported |
230
+
231
+ ### Multi-task Models
232
+
233
+ | Model | Paper | Year | Status |
234
+ |-------|-------|------|--------|
235
+ | [MMOE](nextrec/models/multi_task/mmoe.py) | Modeling Task Relationships in Multi-task Learning | KDD 2018 | Supported |
236
+ | [PLE](nextrec/models/multi_task/ple.py) | Progressive Layered Extraction | RecSys 2020 | Supported |
237
+ | [ESMM](nextrec/models/multi_task/esmm.py) | Entire Space Multi-task Model | SIGIR 2018 | Supported |
238
+ | [ShareBottom](nextrec/models/multi_task/share_bottom.py) | Multitask Learning | - | Supported |
239
+ | [POSO](nextrec/models/multi_task/poso.py) | POSO: Personalized Cold-start Modules for Large-scale Recommender Systems | 2021 | Supported |
240
+ | [POSO-IFLYTEK](nextrec/models/multi_task/poso_iflytek.py) | POSO with PLE-style gating for sequential marketing tasks | - | Supported |
241
+
242
+ ### Generative Models
243
+
244
+ | Model | Paper | Year | Status |
245
+ |-------|-------|------|--------|
246
+ | [TIGER](nextrec/models/generative/tiger.py) | Recommender Systems with Generative Retrieval | NeurIPS 2023 | In Progress |
247
+ | [HSTU](nextrec/models/generative/hstu.py) | Hierarchical Sequential Transduction Units | - | In Progress |
248
+
249
+ ---
250
+
251
+ ## Contributing
252
+
253
+ We welcome contributions of any form!
254
+
255
+ ### How to Contribute
256
+
257
+ 1. Fork the repository
258
+ 2. Create your feature branch (`git checkout -b feature/AmazingFeature`)
259
+ 3. Commit your changes (`git commit -m 'Add AmazingFeature'`)
260
+ 4. Push your branch (`git push origin feature/AmazingFeature`)
261
+ 5. Open a Pull Request
262
+
263
+ > Before submitting a PR, please run tests using `pytest test/ -v` or `python -m pytest` to ensure everything passes.
264
+
265
+ ### Code Style
266
+
267
+ - Follow PEP8
268
+ - Provide unit tests for new functionality
269
+ - Update documentation accordingly
270
+
271
+ ### Reporting Issues
272
+
273
+ When submitting issues on GitHub, please include:
274
+
275
+ - Description of the problem
276
+ - Reproduction steps
277
+ - Expected behavior
278
+ - Actual behavior
279
+ - Environment info (Python version, PyTorch version, etc.)
280
+
281
+ ---
282
+
283
+ ## License
284
+
285
+ This project is licensed under the [Apache 2.0 License](./LICENSE).
286
+
287
+ ---
288
+
289
+ ## Contact
290
+
291
+ - **GitHub Issues**: [Submit an issue](https://github.com/zerolovesea/NextRec/issues)
292
+ - **Email**: zyaztec@gmail.com
293
+
294
+ ---
295
+
296
+ ## Acknowledgements
297
+
298
+ NextRec is inspired by the following great open-source projects:
299
+
300
+ - [torch-rechub](https://github.com/datawhalechina/torch-rechub) — Flexible, easy-to-extend recommendation framework
301
+ - [FuxiCTR](https://github.com/reczoo/FuxiCTR) — Configurable, tunable, and reproducible CTR library
302
+ - [RecBole](https://github.com/RUCAIBox/RecBole) — Unified, comprehensive, and efficient recommendation library
303
+
304
+ Special thanks to all open-source contributors!
305
+
306
+ ---
307
+
308
+ <div align="center">
309
+
310
+ **[Back to Top](#nextrec)**
311
+
312
+ </div>
@@ -0,0 +1,256 @@
1
+ <p align="center">
2
+ <img align="center" src="asserts/logo.png" width="40%">
3
+ <p>
4
+
5
+ <div align="center">
6
+
7
+ ![Python](https://img.shields.io/badge/Python-3.10+-blue.svg)
8
+ ![PyTorch](https://img.shields.io/badge/PyTorch-1.10+-ee4c2c.svg)
9
+ ![License](https://img.shields.io/badge/License-Apache%202.0-green.svg)
10
+ ![Version](https://img.shields.io/badge/Version-0.3.2-orange.svg)
11
+
12
+ English | [中文文档](README_zh.md)
13
+
14
+ **A Unified, Efficient, and Scalable Recommendation System Framework**
15
+
16
+ </div>
17
+
18
+ ## Introduction
19
+
20
+ NextRec is a modern recommendation framework built on PyTorch, delivering a unified experience for modeling, training, and evaluation. It follows a modular design with rich model implementations, data-processing utilities, and engineering-ready training components. NextRec focuses on large-scale industrial recall scenarios on Spark clusters, training on massive offline parquet features.
21
+
22
+ ## Why NextRec
23
+
24
+ - **Unified feature engineering & data pipeline**: Dense/Sparse/Sequence feature definitions, persistent DataProcessor, and batch-optimized RecDataLoader, matching offline feature training/inference in industrial big-data settings.
25
+ - **Multi-scenario coverage**: Ranking (CTR/CVR), retrieval, multi-task learning, and more marketing/rec models, with a continuously expanding model zoo.
26
+ - **Developer-friendly experience**: Stream processing/training/inference for csv/parquet/pathlike data, plus GPU/MPS acceleration and visualization support.
27
+ - **Efficient training & evaluation**: Standardized engine with optimizers, LR schedulers, early stopping, checkpoints, and detailed logging out of the box.
28
+
29
+ ## Architecture
30
+
31
+ NextRec adopts a modular and low-coupling engineering design, enabling full-pipeline reusability and scalability across data processing → model construction → training & evaluation → inference & deployment. Its core components include: a Feature-Spec-driven Embedding architecture, the BaseModel abstraction, a set of independent reusable Layers, a unified DataLoader for both training and inference, and a ready-to-use Model Zoo.
32
+
33
+ ![NextRec Architecture](asserts/nextrec_diagram_en.png)
34
+
35
+ > The project borrows ideas from excellent open-source rec libraries. Early layers referenced [torch-rechub](https://github.com/datawhalechina/torch-rechub) but have been replaced with in-house implementations. torch-rechub remains mature in architecture and models; the author contributed a bit there—feel free to check it out.
36
+
37
+ ---
38
+
39
+ ## Installation
40
+
41
+ You can quickly install the latest NextRec via `pip install nextrec`; Python 3.10+ is required.
42
+
43
+ ## Tutorials
44
+
45
+ See `tutorials/` for examples covering ranking, retrieval, multi-task learning, and data processing:
46
+
47
+ - [movielen_ranking_deepfm.py](/tutorials/movielen_ranking_deepfm.py) — DeepFM training on MovieLens 100k
48
+ - [example_ranking_din.py](/tutorials/example_ranking_din.py) — DIN training on the e-commerce dataset
49
+ - [example_multitask.py](/tutorials/example_multitask.py) — ESMM multi-task training on the e-commerce dataset
50
+ - [movielen_match_dssm.py](/tutorials/example_match_dssm.py) — DSSM retrieval on MovieLens 100k
51
+
52
+ To dive deeper, Jupyter notebooks are available:
53
+
54
+ - [Hands on the NextRec framework](/tutorials/notebooks/en/Hands%20on%20nextrec.ipynb)
55
+ - [Using the data processor for preprocessing](/tutorials/notebooks/en/Hands%20on%20dataprocessor.ipynb)
56
+
57
+ > Current version [0.3.2]: the matching module is not fully polished yet and may have compatibility issues or unexpected errors. Please raise an issue if you run into problems.
58
+
59
+ ## 5-Minute Quick Start
60
+
61
+ We provide a detailed quick start and paired datasets to help you learn the framework. In `datasets/` you’ll find an e-commerce sample dataset like this:
62
+
63
+ | user_id | item_id | dense_0 | dense_1 | dense_2 | dense_3 | dense_4 | dense_5 | dense_6 | dense_7 | sparse_0 | sparse_1 | sparse_2 | sparse_3 | sparse_4 | sparse_5 | sparse_6 | sparse_7 | sparse_8 | sparse_9 | sequence_0 | sequence_1 | label |
64
+ |--------|---------|-------------|-------------|-------------|------------|-------------|-------------|-------------|-------------|----------|----------|----------|----------|----------|----------|----------|----------|----------|----------|-----------------------------------------------------------|-----------------------------------------------------------|-------|
65
+ | 1 | 7817 | 0.14704075 | 0.31020382 | 0.77780896 | 0.944897 | 0.62315375 | 0.57124174 | 0.77009535 | 0.3211029 | 315 | 260 | 379 | 146 | 168 | 161 | 138 | 88 | 5 | 312 | [170,175,97,338,105,353,272,546,175,545,463,128,0,0,0] | [368,414,820,405,548,63,327,0,0,0,0,0,0,0,0] | 0 |
66
+ | 1 | 3579 | 0.77811223 | 0.80359334 | 0.5185201 | 0.91091245 | 0.043562356 | 0.82142705 | 0.8803686 | 0.33748195 | 149 | 229 | 442 | 6 | 167 | 252 | 25 | 402 | 7 | 168 | [179,48,61,551,284,165,344,151,0,0,0,0,0,0,0] | [814,0,0,0,0,0,0,0,0,0,0,0,0,0,0] | 1 |
67
+
68
+ Below is a short example showing how to train a DIN model. DIN (Deep Interest Network) won Best Paper at KDD 2018 for CTR prediction. You can also run `python tutorials/example_ranking_din.py` directly.
69
+
70
+ After training, detailed logs are available under `nextrec_logs/din_tutorial`.
71
+
72
+ ```python
73
+ import pandas as pd
74
+
75
+ from nextrec.models.ranking.din import DIN
76
+ from nextrec.basic.features import DenseFeature, SparseFeature, SequenceFeature
77
+
78
+ df = pd.read_csv('dataset/ranking_task.csv')
79
+
80
+ for col in df.columns and 'sequence' in col: # csv loads lists as text; convert them back to objects
81
+ df[col] = df[col].apply(lambda x: eval(x) if isinstance(x, str) else x)
82
+
83
+ # Define feature columns
84
+ dense_features = [DenseFeature(name=f'dense_{i}', input_dim=1) for i in range(8)]
85
+
86
+ sparse_features = [SparseFeature(name='user_id', embedding_name='user_emb', vocab_size=int(df['user_id'].max() + 1), embedding_dim=32), SparseFeature(name='item_id', embedding_name='item_emb', vocab_size=int(df['item_id'].max() + 1), embedding_dim=32),]
87
+
88
+ sparse_features.extend([SparseFeature(name=f'sparse_{i}', embedding_name=f'sparse_{i}_emb', vocab_size=int(df[f'sparse_{i}'].max() + 1), embedding_dim=32) for i in range(10)])
89
+
90
+ sequence_features = [
91
+ SequenceFeature(name='sequence_0', vocab_size=int(df['sequence_0'].apply(lambda x: max(x)).max() + 1), embedding_dim=32, padding_idx=0, embedding_name='item_emb'),
92
+ SequenceFeature(name='sequence_1', vocab_size=int(df['sequence_1'].apply(lambda x: max(x)).max() + 1), embedding_dim=16, padding_idx=0, embedding_name='sparse_0_emb'),]
93
+
94
+ mlp_params = {
95
+ "dims": [256, 128, 64],
96
+ "activation": "relu",
97
+ "dropout": 0.3,
98
+ }
99
+
100
+ model = DIN(
101
+ dense_features=dense_features,
102
+ sparse_features=sparse_features,
103
+ sequence_features=sequence_features,
104
+ mlp_params=mlp_params,
105
+ attention_hidden_units=[80, 40],
106
+ attention_activation='sigmoid',
107
+ attention_use_softmax=True,
108
+ target=['label'], # target variable
109
+ device='mps',
110
+ embedding_l1_reg=1e-6,
111
+ embedding_l2_reg=1e-5,
112
+ dense_l1_reg=1e-5,
113
+ dense_l2_reg=1e-4,
114
+ session_id="din_tutorial", # experiment id for logs
115
+ )
116
+
117
+ # Compile model with optimizer and loss
118
+ model.compile(
119
+ optimizer = "adam",
120
+ optimizer_params = {"lr": 1e-3, "weight_decay": 1e-5},
121
+ loss = "focal",
122
+ loss_params={"gamma": 2.0, "alpha": 0.25},
123
+ )
124
+
125
+ model.fit(
126
+ train_data=df,
127
+ metrics=['auc', 'gauc', 'logloss'], # metrics to track
128
+ epochs=3,
129
+ batch_size=512,
130
+ shuffle=True,
131
+ user_id_column='user_id' # used for GAUC
132
+ )
133
+
134
+ # Evaluate after training
135
+ metrics = model.evaluate(
136
+ df,
137
+ metrics=['auc', 'gauc', 'logloss'],
138
+ batch_size=512,
139
+ user_id_column='user_id'
140
+ )
141
+ ```
142
+
143
+ ---
144
+
145
+ ## Supported Models
146
+
147
+ ### Ranking Models
148
+
149
+ | Model | Paper | Year | Status |
150
+ |-------|-------|------|--------|
151
+ | [FM](nextrec/models/ranking/fm.py) | Factorization Machines | ICDM 2010 | Supported |
152
+ | [AFM](nextrec/models/ranking/afm.py) | Attentional Factorization Machines: Learning the Weight of Feature Interactions via Attention Networks | IJCAI 2017 | Supported |
153
+ | [DeepFM](nextrec/models/ranking/deepfm.py) | DeepFM: A Factorization-Machine based Neural Network for CTR Prediction | IJCAI 2017 | Supported |
154
+ | [Wide&Deep](nextrec/models/ranking/widedeep.py) | Wide & Deep Learning for Recommender Systems | DLRS 2016 | Supported |
155
+ | [xDeepFM](nextrec/models/ranking/xdeepfm.py) | xDeepFM: Combining Explicit and Implicit Feature Interactions | KDD 2018 | Supported |
156
+ | [FiBiNET](nextrec/models/ranking/fibinet.py) | FiBiNET: Combining Feature Importance and Bilinear Feature Interaction for CTR Prediction | RecSys 2019 | Supported |
157
+ | [PNN](nextrec/models/ranking/pnn.py) | Product-based Neural Networks for User Response Prediction | ICDM 2016 | Supported |
158
+ | [AutoInt](nextrec/models/ranking/autoint.py) | AutoInt: Automatic Feature Interaction Learning | CIKM 2019 | Supported |
159
+ | [DCN](nextrec/models/ranking/dcn.py) | Deep & Cross Network for Ad Click Predictions | ADKDD 2017 | Supported |
160
+ | [DCN v2](nextrec/models/ranking/dcn_v2.py) | DCN V2: Improved Deep & Cross Network and Practical Lessons for Web-scale Learning to Rank Systems | KDD 2021 | In Progress |
161
+ | [DIN](nextrec/models/ranking/din.py) | Deep Interest Network for CTR Prediction | KDD 2018 | Supported |
162
+ | [DIEN](nextrec/models/ranking/dien.py) | Deep Interest Evolution Network | AAAI 2019 | Supported |
163
+ | [MaskNet](nextrec/models/ranking/masknet.py) | MaskNet: Feature-wise Gating Blocks for High-dimensional Sparse Recommendation Data | 2020 | Supported |
164
+
165
+ ### Retrieval Models
166
+
167
+ | Model | Paper | Year | Status |
168
+ |-------|-------|------|--------|
169
+ | [DSSM](nextrec/models/match/dssm.py) | Learning Deep Structured Semantic Models | CIKM 2013 | Supported |
170
+ | [DSSM v2](nextrec/models/match/dssm_v2.py) | DSSM with pairwise BPR-style optimization | - | Supported |
171
+ | [YouTube DNN](nextrec/models/match/youtube_dnn.py) | Deep Neural Networks for YouTube Recommendations | RecSys 2016 | Supported |
172
+ | [MIND](nextrec/models/match/mind.py) | Multi-Interest Network with Dynamic Routing | CIKM 2019 | Supported |
173
+ | [SDM](nextrec/models/match/sdm.py) | Sequential Deep Matching Model | - | Supported |
174
+
175
+ ### Multi-task Models
176
+
177
+ | Model | Paper | Year | Status |
178
+ |-------|-------|------|--------|
179
+ | [MMOE](nextrec/models/multi_task/mmoe.py) | Modeling Task Relationships in Multi-task Learning | KDD 2018 | Supported |
180
+ | [PLE](nextrec/models/multi_task/ple.py) | Progressive Layered Extraction | RecSys 2020 | Supported |
181
+ | [ESMM](nextrec/models/multi_task/esmm.py) | Entire Space Multi-task Model | SIGIR 2018 | Supported |
182
+ | [ShareBottom](nextrec/models/multi_task/share_bottom.py) | Multitask Learning | - | Supported |
183
+ | [POSO](nextrec/models/multi_task/poso.py) | POSO: Personalized Cold-start Modules for Large-scale Recommender Systems | 2021 | Supported |
184
+ | [POSO-IFLYTEK](nextrec/models/multi_task/poso_iflytek.py) | POSO with PLE-style gating for sequential marketing tasks | - | Supported |
185
+
186
+ ### Generative Models
187
+
188
+ | Model | Paper | Year | Status |
189
+ |-------|-------|------|--------|
190
+ | [TIGER](nextrec/models/generative/tiger.py) | Recommender Systems with Generative Retrieval | NeurIPS 2023 | In Progress |
191
+ | [HSTU](nextrec/models/generative/hstu.py) | Hierarchical Sequential Transduction Units | - | In Progress |
192
+
193
+ ---
194
+
195
+ ## Contributing
196
+
197
+ We welcome contributions of any form!
198
+
199
+ ### How to Contribute
200
+
201
+ 1. Fork the repository
202
+ 2. Create your feature branch (`git checkout -b feature/AmazingFeature`)
203
+ 3. Commit your changes (`git commit -m 'Add AmazingFeature'`)
204
+ 4. Push your branch (`git push origin feature/AmazingFeature`)
205
+ 5. Open a Pull Request
206
+
207
+ > Before submitting a PR, please run tests using `pytest test/ -v` or `python -m pytest` to ensure everything passes.
208
+
209
+ ### Code Style
210
+
211
+ - Follow PEP8
212
+ - Provide unit tests for new functionality
213
+ - Update documentation accordingly
214
+
215
+ ### Reporting Issues
216
+
217
+ When submitting issues on GitHub, please include:
218
+
219
+ - Description of the problem
220
+ - Reproduction steps
221
+ - Expected behavior
222
+ - Actual behavior
223
+ - Environment info (Python version, PyTorch version, etc.)
224
+
225
+ ---
226
+
227
+ ## License
228
+
229
+ This project is licensed under the [Apache 2.0 License](./LICENSE).
230
+
231
+ ---
232
+
233
+ ## Contact
234
+
235
+ - **GitHub Issues**: [Submit an issue](https://github.com/zerolovesea/NextRec/issues)
236
+ - **Email**: zyaztec@gmail.com
237
+
238
+ ---
239
+
240
+ ## Acknowledgements
241
+
242
+ NextRec is inspired by the following great open-source projects:
243
+
244
+ - [torch-rechub](https://github.com/datawhalechina/torch-rechub) — Flexible, easy-to-extend recommendation framework
245
+ - [FuxiCTR](https://github.com/reczoo/FuxiCTR) — Configurable, tunable, and reproducible CTR library
246
+ - [RecBole](https://github.com/RUCAIBox/RecBole) — Unified, comprehensive, and efficient recommendation library
247
+
248
+ Special thanks to all open-source contributors!
249
+
250
+ ---
251
+
252
+ <div align="center">
253
+
254
+ **[Back to Top](#nextrec)**
255
+
256
+ </div>