nextrec 0.4.30__tar.gz → 0.4.32__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (195) hide show
  1. {nextrec-0.4.30 → nextrec-0.4.32}/LICENSE +1 -1
  2. {nextrec-0.4.30 → nextrec-0.4.32}/PKG-INFO +5 -5
  3. {nextrec-0.4.30 → nextrec-0.4.32}/README.md +4 -4
  4. {nextrec-0.4.30 → nextrec-0.4.32}/README_en.md +4 -4
  5. {nextrec-0.4.30 → nextrec-0.4.32}/docs/rtd/conf.py +2 -2
  6. nextrec-0.4.32/nextrec/__version__.py +1 -0
  7. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/basic/model.py +48 -4
  8. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/cli.py +18 -10
  9. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/data/batch_utils.py +2 -2
  10. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/data/preprocessor.py +53 -1
  11. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/multi_task/[pre]aitm.py +3 -3
  12. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/multi_task/[pre]snr_trans.py +3 -3
  13. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/multi_task/[pre]star.py +3 -3
  14. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/multi_task/apg.py +3 -3
  15. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/multi_task/cross_stitch.py +3 -3
  16. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/multi_task/escm.py +3 -3
  17. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/multi_task/esmm.py +3 -3
  18. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/multi_task/hmoe.py +3 -3
  19. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/multi_task/mmoe.py +3 -3
  20. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/multi_task/pepnet.py +4 -4
  21. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/multi_task/ple.py +3 -3
  22. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/multi_task/poso.py +3 -3
  23. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/multi_task/share_bottom.py +3 -3
  24. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/ranking/afm.py +3 -2
  25. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/ranking/autoint.py +3 -2
  26. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/ranking/dcn.py +3 -2
  27. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/ranking/dcn_v2.py +3 -2
  28. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/ranking/deepfm.py +3 -2
  29. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/ranking/dien.py +3 -2
  30. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/ranking/din.py +3 -2
  31. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/ranking/eulernet.py +3 -2
  32. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/ranking/ffm.py +3 -2
  33. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/ranking/fibinet.py +3 -2
  34. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/ranking/fm.py +3 -2
  35. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/ranking/lr.py +3 -2
  36. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/ranking/masknet.py +3 -2
  37. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/ranking/pnn.py +3 -2
  38. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/ranking/widedeep.py +3 -2
  39. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/ranking/xdeepfm.py +3 -2
  40. nextrec-0.4.32/nextrec/models/tree_base/__init__.py +15 -0
  41. nextrec-0.4.32/nextrec/models/tree_base/base.py +693 -0
  42. nextrec-0.4.32/nextrec/models/tree_base/catboost.py +97 -0
  43. nextrec-0.4.32/nextrec/models/tree_base/lightgbm.py +69 -0
  44. nextrec-0.4.32/nextrec/models/tree_base/xgboost.py +61 -0
  45. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/utils/config.py +1 -0
  46. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/utils/types.py +2 -0
  47. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/NextRec-CLI.md +8 -0
  48. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/NextRec-CLI_zh.md +8 -0
  49. nextrec-0.4.32/nextrec_cli_preset/model_configs/fibinet.yaml +16 -0
  50. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/train_config.yaml +6 -0
  51. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/train_config_template.yaml +6 -0
  52. {nextrec-0.4.30 → nextrec-0.4.32}/pyproject.toml +1 -1
  53. {nextrec-0.4.30 → nextrec-0.4.32}/tutorials/example_multitask.py +4 -4
  54. {nextrec-0.4.30 → nextrec-0.4.32}/tutorials/example_ranking_din.py +7 -6
  55. nextrec-0.4.32/tutorials/example_tree.py +97 -0
  56. {nextrec-0.4.30 → nextrec-0.4.32}/tutorials/movielen_match_dssm.py +1 -1
  57. nextrec-0.4.30/nextrec/__version__.py +0 -1
  58. nextrec-0.4.30/nextrec_cli_preset/model_configs/fibinet.yaml +0 -14
  59. {nextrec-0.4.30 → nextrec-0.4.32}/.github/workflows/publish.yml +0 -0
  60. {nextrec-0.4.30 → nextrec-0.4.32}/.github/workflows/tests.yml +0 -0
  61. {nextrec-0.4.30 → nextrec-0.4.32}/.gitignore +0 -0
  62. {nextrec-0.4.30 → nextrec-0.4.32}/.readthedocs.yaml +0 -0
  63. {nextrec-0.4.30 → nextrec-0.4.32}/CODE_OF_CONDUCT.md +0 -0
  64. {nextrec-0.4.30 → nextrec-0.4.32}/CONTRIBUTING.md +0 -0
  65. {nextrec-0.4.30 → nextrec-0.4.32}/MANIFEST.in +0 -0
  66. {nextrec-0.4.30 → nextrec-0.4.32}/assets/Feature Configuration.png +0 -0
  67. {nextrec-0.4.30 → nextrec-0.4.32}/assets/Model Parameters.png +0 -0
  68. {nextrec-0.4.30 → nextrec-0.4.32}/assets/Training Configuration.png +0 -0
  69. {nextrec-0.4.30 → nextrec-0.4.32}/assets/Training logs.png +0 -0
  70. {nextrec-0.4.30 → nextrec-0.4.32}/assets/logo.png +0 -0
  71. {nextrec-0.4.30 → nextrec-0.4.32}/assets/mmoe_tutorial.png +0 -0
  72. {nextrec-0.4.30 → nextrec-0.4.32}/assets/nextrec_diagram.png +0 -0
  73. {nextrec-0.4.30 → nextrec-0.4.32}/assets/test data.png +0 -0
  74. {nextrec-0.4.30 → nextrec-0.4.32}/dataset/ctcvr_task.csv +0 -0
  75. {nextrec-0.4.30 → nextrec-0.4.32}/dataset/ecommerce_task.csv +0 -0
  76. {nextrec-0.4.30 → nextrec-0.4.32}/dataset/match_task.csv +0 -0
  77. {nextrec-0.4.30 → nextrec-0.4.32}/dataset/movielens_100k.csv +0 -0
  78. {nextrec-0.4.30 → nextrec-0.4.32}/dataset/multitask_task.csv +0 -0
  79. {nextrec-0.4.30 → nextrec-0.4.32}/dataset/ranking_task.csv +0 -0
  80. {nextrec-0.4.30 → nextrec-0.4.32}/docs/en/Getting started guide.md +0 -0
  81. {nextrec-0.4.30 → nextrec-0.4.32}/docs/rtd/Makefile +0 -0
  82. {nextrec-0.4.30 → nextrec-0.4.32}/docs/rtd/index.md +0 -0
  83. {nextrec-0.4.30 → nextrec-0.4.32}/docs/rtd/make.bat +0 -0
  84. {nextrec-0.4.30 → nextrec-0.4.32}/docs/rtd/modules.rst +0 -0
  85. {nextrec-0.4.30 → nextrec-0.4.32}/docs/rtd/nextrec.basic.rst +0 -0
  86. {nextrec-0.4.30 → nextrec-0.4.32}/docs/rtd/nextrec.data.rst +0 -0
  87. {nextrec-0.4.30 → nextrec-0.4.32}/docs/rtd/nextrec.loss.rst +0 -0
  88. {nextrec-0.4.30 → nextrec-0.4.32}/docs/rtd/nextrec.rst +0 -0
  89. {nextrec-0.4.30 → nextrec-0.4.32}/docs/rtd/nextrec.utils.rst +0 -0
  90. {nextrec-0.4.30 → nextrec-0.4.32}/docs/rtd/requirements.txt +0 -0
  91. {nextrec-0.4.30 → nextrec-0.4.32}/docs/zh//345/277/253/351/200/237/344/270/212/346/211/213.md" +0 -0
  92. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/__init__.py +0 -0
  93. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/basic/__init__.py +0 -0
  94. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/basic/activation.py +0 -0
  95. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/basic/asserts.py +0 -0
  96. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/basic/callback.py +0 -0
  97. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/basic/features.py +0 -0
  98. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/basic/heads.py +0 -0
  99. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/basic/layers.py +0 -0
  100. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/basic/loggers.py +0 -0
  101. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/basic/metrics.py +0 -0
  102. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/basic/session.py +0 -0
  103. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/basic/summary.py +0 -0
  104. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/data/__init__.py +0 -0
  105. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/data/data_processing.py +0 -0
  106. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/data/data_utils.py +0 -0
  107. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/data/dataloader.py +0 -0
  108. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/loss/__init__.py +0 -0
  109. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/loss/grad_norm.py +0 -0
  110. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/loss/listwise.py +0 -0
  111. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/loss/pairwise.py +0 -0
  112. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/loss/pointwise.py +0 -0
  113. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/generative/__init__.py +0 -0
  114. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/generative/tiger.py +0 -0
  115. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/multi_task/__init__.py +0 -0
  116. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/ranking/__init__.py +0 -0
  117. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/representation/__init__.py +0 -0
  118. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/representation/autorec.py +0 -0
  119. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/representation/bpr.py +0 -0
  120. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/representation/cl4srec.py +0 -0
  121. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/representation/lightgcn.py +0 -0
  122. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/representation/mf.py +0 -0
  123. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/representation/rqvae.py +0 -0
  124. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/representation/s3rec.py +0 -0
  125. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/retrieval/__init__.py +0 -0
  126. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/retrieval/dssm.py +0 -0
  127. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/retrieval/dssm_v2.py +0 -0
  128. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/retrieval/mind.py +0 -0
  129. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/retrieval/sdm.py +0 -0
  130. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/retrieval/youtube_dnn.py +0 -0
  131. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/sequential/hstu.py +0 -0
  132. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/models/sequential/sasrec.py +0 -0
  133. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/utils/__init__.py +0 -0
  134. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/utils/console.py +0 -0
  135. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/utils/data.py +0 -0
  136. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/utils/embedding.py +0 -0
  137. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/utils/feature.py +0 -0
  138. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/utils/loss.py +0 -0
  139. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/utils/model.py +0 -0
  140. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec/utils/torch_utils.py +0 -0
  141. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/feature_config.yaml +0 -0
  142. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/afm.yaml +0 -0
  143. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/apg.yaml +0 -0
  144. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/autoint.yaml +0 -0
  145. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/cross_stitch.yaml +0 -0
  146. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/dcn.yaml +0 -0
  147. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/deepfm.yaml +0 -0
  148. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/din.yaml +0 -0
  149. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/escm.yaml +0 -0
  150. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/esmm.yaml +0 -0
  151. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/fm.yaml +0 -0
  152. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/hmoe.yaml +0 -0
  153. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/masknet.yaml +0 -0
  154. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/mmoe.yaml +0 -0
  155. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/pepnet.yaml +0 -0
  156. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/ple.yaml +0 -0
  157. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/pnn.yaml +0 -0
  158. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/poso.yaml +0 -0
  159. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/share_bottom.yaml +0 -0
  160. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/widedeep.yaml +0 -0
  161. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/model_configs/xdeepfm.yaml +0 -0
  162. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/predict_config.yaml +0 -0
  163. {nextrec-0.4.30 → nextrec-0.4.32}/nextrec_cli_preset/predict_config_template.yaml +0 -0
  164. {nextrec-0.4.30 → nextrec-0.4.32}/pytest.ini +0 -0
  165. {nextrec-0.4.30 → nextrec-0.4.32}/requirements.txt +0 -0
  166. {nextrec-0.4.30 → nextrec-0.4.32}/scripts/format_code.py +0 -0
  167. {nextrec-0.4.30 → nextrec-0.4.32}/test/__init__.py +0 -0
  168. {nextrec-0.4.30 → nextrec-0.4.32}/test/conftest.py +0 -0
  169. {nextrec-0.4.30 → nextrec-0.4.32}/test/helpers.py +0 -0
  170. {nextrec-0.4.30 → nextrec-0.4.32}/test/run_tests.py +0 -0
  171. {nextrec-0.4.30 → nextrec-0.4.32}/test/test_base_model_regularization.py +0 -0
  172. {nextrec-0.4.30 → nextrec-0.4.32}/test/test_generative_models.py +0 -0
  173. {nextrec-0.4.30 → nextrec-0.4.32}/test/test_layers.py +0 -0
  174. {nextrec-0.4.30 → nextrec-0.4.32}/test/test_losses.py +0 -0
  175. {nextrec-0.4.30 → nextrec-0.4.32}/test/test_match_models.py +0 -0
  176. {nextrec-0.4.30 → nextrec-0.4.32}/test/test_multitask_models.py +0 -0
  177. {nextrec-0.4.30 → nextrec-0.4.32}/test/test_preprocessor.py +0 -0
  178. {nextrec-0.4.30 → nextrec-0.4.32}/test/test_ranking_models.py +0 -0
  179. {nextrec-0.4.30 → nextrec-0.4.32}/test/test_utils_console.py +0 -0
  180. {nextrec-0.4.30 → nextrec-0.4.32}/test/test_utils_data.py +0 -0
  181. {nextrec-0.4.30 → nextrec-0.4.32}/test/test_utils_embedding.py +0 -0
  182. {nextrec-0.4.30 → nextrec-0.4.32}/test_requirements.txt +0 -0
  183. {nextrec-0.4.30 → nextrec-0.4.32}/tutorials/distributed/example_distributed_training.py +0 -0
  184. {nextrec-0.4.30 → nextrec-0.4.32}/tutorials/distributed/example_distributed_training_large_dataset.py +0 -0
  185. {nextrec-0.4.30 → nextrec-0.4.32}/tutorials/example_match.py +0 -0
  186. {nextrec-0.4.30 → nextrec-0.4.32}/tutorials/movielen_ranking_deepfm.py +0 -0
  187. {nextrec-0.4.30 → nextrec-0.4.32}/tutorials/notebooks/en/Build semantic ID with RQ-VAE.ipynb +0 -0
  188. {nextrec-0.4.30 → nextrec-0.4.32}/tutorials/notebooks/en/Hands on dataprocessor.ipynb +0 -0
  189. {nextrec-0.4.30 → nextrec-0.4.32}/tutorials/notebooks/en/Hands on nextrec.ipynb +0 -0
  190. {nextrec-0.4.30 → nextrec-0.4.32}/tutorials/notebooks/zh//344/275/277/347/224/250RQ-VAE/346/236/204/345/273/272/350/257/255/344/271/211ID.ipynb" +0 -0
  191. {nextrec-0.4.30 → nextrec-0.4.32}/tutorials/notebooks/zh//345/246/202/344/275/225/344/275/277/347/224/250DataProcessor/350/277/233/350/241/214/351/242/204/345/244/204/347/220/206.ipynb" +0 -0
  192. {nextrec-0.4.30 → nextrec-0.4.32}/tutorials/notebooks/zh//345/277/253/351/200/237/345/205/245/351/227/250nextrec.ipynb" +0 -0
  193. {nextrec-0.4.30 → nextrec-0.4.32}/tutorials/run_all_match_models.py +0 -0
  194. {nextrec-0.4.30 → nextrec-0.4.32}/tutorials/run_all_multitask_models.py +0 -0
  195. {nextrec-0.4.30 → nextrec-0.4.32}/tutorials/run_all_ranking_models.py +0 -0
@@ -1,6 +1,6 @@
1
1
  MIT License
2
2
 
3
- Copyright (c) 2025 NextRec
3
+ Copyright (c) 2026 NextRec
4
4
 
5
5
  Permission is hereby granted, free of charge, to any person obtaining a copy
6
6
  of this software and associated documentation files (the "Software"), to deal
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nextrec
3
- Version: 0.4.30
3
+ Version: 0.4.32
4
4
  Summary: A comprehensive recommendation library with match, ranking, and multi-task learning models
5
5
  Project-URL: Homepage, https://github.com/zerolovesea/NextRec
6
6
  Project-URL: Repository, https://github.com/zerolovesea/NextRec
@@ -69,7 +69,7 @@ Description-Content-Type: text/markdown
69
69
  ![Python](https://img.shields.io/badge/Python-3.10+-blue.svg)
70
70
  ![PyTorch](https://img.shields.io/badge/PyTorch-1.10+-ee4c2c.svg)
71
71
  ![License](https://img.shields.io/badge/License-Apache%202.0-green.svg)
72
- ![Version](https://img.shields.io/badge/Version-0.4.30-orange.svg)
72
+ ![Version](https://img.shields.io/badge/Version-0.4.32-orange.svg)
73
73
  [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/zerolovesea/NextRec)
74
74
 
75
75
  中文文档 | [English Version](README_en.md)
@@ -254,11 +254,11 @@ nextrec --mode=predict --predict_config=path/to/predict_config.yaml
254
254
 
255
255
  预测结果固定保存到 `{checkpoint_path}/predictions/{name}.{save_data_format}`。
256
256
 
257
- > 截止当前版本0.4.30,NextRec CLI支持单机训练,分布式训练相关功能尚在开发中。
257
+ > 截止当前版本0.4.32,NextRec CLI支持单机训练,分布式训练相关功能尚在开发中。
258
258
 
259
259
  ## 兼容平台
260
260
 
261
- 当前最新版本为0.4.30,所有模型和测试代码均已在以下平台通过验证,如果开发者在使用中遇到兼容问题,请在issue区提出错误报告及系统版本:
261
+ 当前最新版本为0.4.32,所有模型和测试代码均已在以下平台通过验证,如果开发者在使用中遇到兼容问题,请在issue区提出错误报告及系统版本:
262
262
 
263
263
  | 平台 | 配置 |
264
264
  |------|------|
@@ -400,7 +400,7 @@ NextRec 的开发受到以下优秀项目的启发:
400
400
  @misc{nextrec,
401
401
  title = {NextRec},
402
402
  author = {Yang Zhou},
403
- year = {2025},
403
+ year = {2026},
404
404
  publisher = {GitHub},
405
405
  journal = {GitHub repository},
406
406
  howpublished = {\url{https://github.com/zerolovesea/NextRec}},
@@ -8,7 +8,7 @@
8
8
  ![Python](https://img.shields.io/badge/Python-3.10+-blue.svg)
9
9
  ![PyTorch](https://img.shields.io/badge/PyTorch-1.10+-ee4c2c.svg)
10
10
  ![License](https://img.shields.io/badge/License-Apache%202.0-green.svg)
11
- ![Version](https://img.shields.io/badge/Version-0.4.30-orange.svg)
11
+ ![Version](https://img.shields.io/badge/Version-0.4.32-orange.svg)
12
12
  [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/zerolovesea/NextRec)
13
13
 
14
14
  中文文档 | [English Version](README_en.md)
@@ -193,11 +193,11 @@ nextrec --mode=predict --predict_config=path/to/predict_config.yaml
193
193
 
194
194
  预测结果固定保存到 `{checkpoint_path}/predictions/{name}.{save_data_format}`。
195
195
 
196
- > 截止当前版本0.4.30,NextRec CLI支持单机训练,分布式训练相关功能尚在开发中。
196
+ > 截止当前版本0.4.32,NextRec CLI支持单机训练,分布式训练相关功能尚在开发中。
197
197
 
198
198
  ## 兼容平台
199
199
 
200
- 当前最新版本为0.4.30,所有模型和测试代码均已在以下平台通过验证,如果开发者在使用中遇到兼容问题,请在issue区提出错误报告及系统版本:
200
+ 当前最新版本为0.4.32,所有模型和测试代码均已在以下平台通过验证,如果开发者在使用中遇到兼容问题,请在issue区提出错误报告及系统版本:
201
201
 
202
202
  | 平台 | 配置 |
203
203
  |------|------|
@@ -339,7 +339,7 @@ NextRec 的开发受到以下优秀项目的启发:
339
339
  @misc{nextrec,
340
340
  title = {NextRec},
341
341
  author = {Yang Zhou},
342
- year = {2025},
342
+ year = {2026},
343
343
  publisher = {GitHub},
344
344
  journal = {GitHub repository},
345
345
  howpublished = {\url{https://github.com/zerolovesea/NextRec}},
@@ -8,7 +8,7 @@
8
8
  ![Python](https://img.shields.io/badge/Python-3.10+-blue.svg)
9
9
  ![PyTorch](https://img.shields.io/badge/PyTorch-1.10+-ee4c2c.svg)
10
10
  ![License](https://img.shields.io/badge/License-Apache%202.0-green.svg)
11
- ![Version](https://img.shields.io/badge/Version-0.4.30-orange.svg)
11
+ ![Version](https://img.shields.io/badge/Version-0.4.32-orange.svg)
12
12
  [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/zerolovesea/NextRec)
13
13
 
14
14
  English | [中文文档](README.md)
@@ -196,11 +196,11 @@ nextrec --mode=predict --predict_config=path/to/predict_config.yaml
196
196
 
197
197
  Prediction outputs are saved under `{checkpoint_path}/predictions/{name}.{save_data_format}`.
198
198
 
199
- > As of version 0.4.30, NextRec CLI supports single-machine training; distributed training features are currently under development.
199
+ > As of version 0.4.32, NextRec CLI supports single-machine training; distributed training features are currently under development.
200
200
 
201
201
  ## Platform Compatibility
202
202
 
203
- The current version is 0.4.30. All models and test code have been validated on the following platforms. If you encounter compatibility issues, please report them in the issue tracker with your system version:
203
+ The current version is 0.4.32. All models and test code have been validated on the following platforms. If you encounter compatibility issues, please report them in the issue tracker with your system version:
204
204
 
205
205
  | Platform | Configuration |
206
206
  |----------|---------------|
@@ -343,7 +343,7 @@ If you use this framework in your research or work, please consider citing:
343
343
  @misc{nextrec,
344
344
  title = {NextRec},
345
345
  author = {Yang Zhou},
346
- year = {2025},
346
+ year = {2026},
347
347
  publisher = {GitHub},
348
348
  journal = {GitHub repository},
349
349
  howpublished = {\url{https://github.com/zerolovesea/NextRec}},
@@ -9,9 +9,9 @@ PROJECT_ROOT = Path(__file__).resolve().parents[2]
9
9
  sys.path.insert(0, str(PROJECT_ROOT / "nextrec"))
10
10
 
11
11
  project = "NextRec"
12
- copyright = "2025, Yang Zhou"
12
+ copyright = "2026, Yang Zhou"
13
13
  author = "Yang Zhou"
14
- release = "0.4.30"
14
+ release = "0.4.32"
15
15
 
16
16
  extensions = [
17
17
  "myst_parser",
@@ -0,0 +1 @@
1
+ __version__ = "0.4.32"
@@ -13,7 +13,7 @@ import sys
13
13
  import pickle
14
14
  import socket
15
15
  from pathlib import Path
16
- from typing import Any, Literal
16
+ from typing import Any, Literal, cast, overload
17
17
 
18
18
  import numpy as np
19
19
  import pandas as pd
@@ -97,6 +97,7 @@ from nextrec.utils.types import (
97
97
  SchedulerName,
98
98
  TrainingModeName,
99
99
  TaskTypeName,
100
+ TaskTypeInput,
100
101
  MetricsName,
101
102
  )
102
103
 
@@ -119,7 +120,7 @@ class BaseModel(SummarySet, FeatureSet, nn.Module):
119
120
  sequence_features: list[SequenceFeature] | None = None,
120
121
  target: list[str] | str | None = None,
121
122
  id_columns: list[str] | str | None = None,
122
- task: TaskTypeName | list[TaskTypeName] | None = None,
123
+ task: TaskTypeInput | list[TaskTypeInput] | None = None,
123
124
  training_mode: TrainingModeName | list[TrainingModeName] | None = None,
124
125
  embedding_l1_reg: float = 0.0,
125
126
  dense_l1_reg: float = 0.0,
@@ -193,7 +194,7 @@ class BaseModel(SummarySet, FeatureSet, nn.Module):
193
194
  dense_features, sparse_features, sequence_features, target, id_columns
194
195
  )
195
196
 
196
- self.task = task or self.default_task
197
+ self.task = cast(TaskTypeName | list[TaskTypeName], task or self.default_task)
197
198
  self.nums_task = len(self.task) if isinstance(self.task, list) else 1
198
199
 
199
200
  training_mode = training_mode or "pointwise"
@@ -1623,6 +1624,49 @@ class BaseModel(SummarySet, FeatureSet, nn.Module):
1623
1624
  )
1624
1625
  return metrics_dict
1625
1626
 
1627
+ @overload
1628
+ def predict(
1629
+ self,
1630
+ data: str | dict | pd.DataFrame | DataLoader,
1631
+ batch_size: int = 32,
1632
+ save_path: str | os.PathLike | None = None,
1633
+ save_format: str = "csv",
1634
+ include_ids: bool | None = None,
1635
+ id_columns: str | list[str] | None = None,
1636
+ return_dataframe: Literal[True] = True,
1637
+ stream_chunk_size: int = 10000,
1638
+ num_workers: int = 0,
1639
+ ) -> pd.DataFrame: ...
1640
+
1641
+ @overload
1642
+ def predict(
1643
+ self,
1644
+ data: str | dict | pd.DataFrame | DataLoader,
1645
+ batch_size: int = 32,
1646
+ save_path: None = None,
1647
+ save_format: str = "csv",
1648
+ include_ids: bool | None = None,
1649
+ id_columns: str | list[str] | None = None,
1650
+ return_dataframe: Literal[False] = False,
1651
+ stream_chunk_size: int = 10000,
1652
+ num_workers: int = 0,
1653
+ ) -> np.ndarray: ...
1654
+
1655
+ @overload
1656
+ def predict(
1657
+ self,
1658
+ data: str | dict | pd.DataFrame | DataLoader,
1659
+ batch_size: int = 32,
1660
+ *,
1661
+ save_path: str | os.PathLike,
1662
+ save_format: str = "csv",
1663
+ include_ids: bool | None = None,
1664
+ id_columns: str | list[str] | None = None,
1665
+ return_dataframe: Literal[False] = False,
1666
+ stream_chunk_size: int = 10000,
1667
+ num_workers: int = 0,
1668
+ ) -> Path: ...
1669
+
1626
1670
  def predict(
1627
1671
  self,
1628
1672
  data: str | dict | pd.DataFrame | DataLoader,
@@ -2225,7 +2269,7 @@ class BaseMatchModel(BaseModel):
2225
2269
  dense_l2_reg: float = 0.0,
2226
2270
  target: list[str] | str | None = "label",
2227
2271
  id_columns: list[str] | str | None = None,
2228
- task: str | list[str] | None = None,
2272
+ task: TaskTypeInput | list[TaskTypeInput] | None = None,
2229
2273
  session_id: str | None = None,
2230
2274
  distributed: bool = False,
2231
2275
  rank: int | None = None,
@@ -407,6 +407,8 @@ def train_model(train_config_path: str) -> None:
407
407
  use_swanlab=train_cfg.get("use_swanlab", False),
408
408
  wandb_api=train_cfg.get("wandb_api"),
409
409
  swanlab_api=train_cfg.get("swanlab_api"),
410
+ wandb_kwargs=train_cfg.get("wandb_kwargs"),
411
+ swanlab_kwargs=train_cfg.get("swanlab_kwargs"),
410
412
  log_interval=train_cfg.get("log_interval", 1),
411
413
  note=train_cfg.get("note"),
412
414
  )
@@ -680,16 +682,22 @@ Examples:
680
682
  if not args.mode:
681
683
  parser.error("[NextRec CLI Error] --mode is required (train|predict)")
682
684
 
683
- if args.mode == "train":
684
- config_path = args.train_config
685
- if not config_path:
686
- parser.error("[NextRec CLI Error] train mode requires --train_config")
687
- train_model(config_path)
688
- else:
689
- config_path = args.predict_config
690
- if not config_path:
691
- parser.error("[NextRec CLI Error] predict mode requires --predict_config")
692
- predict_model(config_path)
685
+ try:
686
+ if args.mode == "train":
687
+ config_path = args.train_config
688
+ if not config_path:
689
+ parser.error("[NextRec CLI Error] train mode requires --train_config")
690
+ train_model(config_path)
691
+ else:
692
+ config_path = args.predict_config
693
+ if not config_path:
694
+ parser.error(
695
+ "[NextRec CLI Error] predict mode requires --predict_config"
696
+ )
697
+ predict_model(config_path)
698
+ except Exception:
699
+ logging.getLogger(__name__).exception("[NextRec CLI Error] Unhandled exception")
700
+ raise
693
701
 
694
702
 
695
703
  if __name__ == "__main__":
@@ -12,7 +12,7 @@ import torch
12
12
 
13
13
 
14
14
  def stack_section(batch: list[dict], section: Literal["features", "labels", "ids"]):
15
- """
15
+ """
16
16
  input example:
17
17
  batch = [
18
18
  {"features": {"f1": tensor1, "f2": tensor2}, "labels": {"label": tensor3}},
@@ -24,7 +24,7 @@ def stack_section(batch: list[dict], section: Literal["features", "labels", "ids
24
24
  "f1": torch.stack([tensor1, tensor4], dim=0),
25
25
  "f2": torch.stack([tensor2, tensor5], dim=0),
26
26
  }
27
-
27
+
28
28
  """
29
29
  entries = [item.get(section) for item in batch if item.get(section) is not None]
30
30
  if not entries:
@@ -13,7 +13,7 @@ import logging
13
13
  import os
14
14
  import pickle
15
15
  from pathlib import Path
16
- from typing import Any, Dict, Literal, Optional, Union
16
+ from typing import Any, Dict, Literal, Optional, Union, overload
17
17
 
18
18
  import numpy as np
19
19
  import pandas as pd
@@ -895,6 +895,28 @@ class DataProcessor(FeatureSet):
895
895
  )
896
896
  return self
897
897
 
898
+ @overload
899
+ def transform_in_memory(
900
+ self,
901
+ data: Union[pd.DataFrame, Dict[str, Any]],
902
+ return_dict: Literal[True],
903
+ persist: bool,
904
+ save_format: Optional[str],
905
+ output_path: Optional[str],
906
+ warn_missing: bool = True,
907
+ ) -> Dict[str, np.ndarray]: ...
908
+
909
+ @overload
910
+ def transform_in_memory(
911
+ self,
912
+ data: Union[pd.DataFrame, Dict[str, Any]],
913
+ return_dict: Literal[False],
914
+ persist: bool,
915
+ save_format: Optional[str],
916
+ output_path: Optional[str],
917
+ warn_missing: bool = True,
918
+ ) -> pd.DataFrame: ...
919
+
898
920
  def transform_in_memory(
899
921
  self,
900
922
  data: Union[pd.DataFrame, Dict[str, Any]],
@@ -1238,6 +1260,36 @@ class DataProcessor(FeatureSet):
1238
1260
  self.is_fitted = True
1239
1261
  return self
1240
1262
 
1263
+ @overload
1264
+ def transform(
1265
+ self,
1266
+ data: Union[pd.DataFrame, Dict[str, Any]],
1267
+ return_dict: Literal[True] = True,
1268
+ save_format: Optional[str] = None,
1269
+ output_path: Optional[str] = None,
1270
+ chunk_size: int = 200000,
1271
+ ) -> Dict[str, np.ndarray]: ...
1272
+
1273
+ @overload
1274
+ def transform(
1275
+ self,
1276
+ data: Union[pd.DataFrame, Dict[str, Any]],
1277
+ return_dict: Literal[False] = False,
1278
+ save_format: Optional[str] = None,
1279
+ output_path: Optional[str] = None,
1280
+ chunk_size: int = 200000,
1281
+ ) -> pd.DataFrame: ...
1282
+
1283
+ @overload
1284
+ def transform(
1285
+ self,
1286
+ data: str | os.PathLike,
1287
+ return_dict: Literal[False] = False,
1288
+ save_format: Optional[str] = None,
1289
+ output_path: Optional[str] = None,
1290
+ chunk_size: int = 200000,
1291
+ ) -> list[str]: ...
1292
+
1241
1293
  def transform(
1242
1294
  self,
1243
1295
  data: Union[pd.DataFrame, Dict[str, Any], str, os.PathLike],
@@ -1,6 +1,6 @@
1
1
  """
2
2
  Date: create on 01/01/2026 - prerelease version: need to overwrite compute_loss later
3
- Checkpoint: edit on 01/01/2026
3
+ Checkpoint: edit on 01/14/2026
4
4
  Author: Yang Zhou, zyaztec@gmail.com
5
5
  Reference:
6
6
  - [1] Xi D, Chen Z, Yan P, Zhang Y, Zhu Y, Zhuang F, Chen Y. Modeling the Sequential Dependence among Audience Multi-step Conversions with Multi-task Learning in Targeted Display Advertising. Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery & Data Mining (KDD ’21), 2021, pp. 3745–3755.
@@ -20,7 +20,7 @@ from nextrec.basic.layers import MLP, EmbeddingLayer
20
20
  from nextrec.basic.heads import TaskHead
21
21
  from nextrec.basic.model import BaseModel
22
22
  from nextrec.utils.model import get_mlp_output_dim
23
- from nextrec.utils.types import TaskTypeName
23
+ from nextrec.utils.types import TaskTypeInput
24
24
 
25
25
 
26
26
  class AITMTransfer(nn.Module):
@@ -76,7 +76,7 @@ class AITM(BaseModel):
76
76
  tower_mlp_params_list: list[dict] | None = None,
77
77
  calibrator_alpha: float = 0.1,
78
78
  target: list[str] | str | None = None,
79
- task: list[TaskTypeName] | None = None,
79
+ task: list[TaskTypeInput] | None = None,
80
80
  **kwargs,
81
81
  ):
82
82
  dense_features = dense_features or []
@@ -1,6 +1,6 @@
1
1
  """
2
2
  Date: create on 01/01/2026 - prerelease version: still need to align with the source paper
3
- Checkpoint: edit on 01/01/2026
3
+ Checkpoint: edit on 01/14/2026
4
4
  Author: Yang Zhou, zyaztec@gmail.com
5
5
  Reference:
6
6
  - [1] Ma J, Zhao Z, Chen J, Li A, Hong L, Chi EH. SNR: Sub-Network Routing for Flexible Parameter Sharing in Multi-Task Learning in E-Commerce by Exploiting Task Relationships in the Label Space. Proceedings of the 33rd AAAI Conference on Artificial Intelligence (AAAI 2019), 2019, pp. 216-223.
@@ -22,7 +22,7 @@ from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
22
22
  from nextrec.basic.layers import EmbeddingLayer, MLP
23
23
  from nextrec.basic.heads import TaskHead
24
24
  from nextrec.basic.model import BaseModel
25
- from nextrec.utils.types import TaskTypeName
25
+ from nextrec.utils.types import TaskTypeInput, TaskTypeName
26
26
 
27
27
 
28
28
  class SNRTransGate(nn.Module):
@@ -101,7 +101,7 @@ class SNRTrans(BaseModel):
101
101
  num_experts: int = 4,
102
102
  tower_mlp_params_list: list[dict] | None = None,
103
103
  target: list[str] | str | None = None,
104
- task: TaskTypeName | list[TaskTypeName] | None = None,
104
+ task: TaskTypeInput | list[TaskTypeInput] | None = None,
105
105
  **kwargs,
106
106
  ) -> None:
107
107
  dense_features = dense_features or []
@@ -1,6 +1,6 @@
1
1
  """
2
2
  Date: create on 01/01/2026 - prerelease version: still need to align with the source paper
3
- Checkpoint: edit on 01/01/2026
3
+ Checkpoint: edit on 01/14/2026
4
4
  Author: Yang Zhou, zyaztec@gmail.com
5
5
  Reference:
6
6
  - [1] Sheng XR, Zhao L, Zhou G, Ding X, Dai B, Luo Q, Yang S, Lv J, Zhang C, Deng H, Zhu X. One Model to Serve All: Star Topology Adaptive Recommender for Multi-Domain CTR Prediction. arXiv preprint arXiv:2101.11427, 2021.
@@ -22,7 +22,7 @@ from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
22
22
  from nextrec.basic.heads import TaskHead
23
23
  from nextrec.basic.layers import DomainBatchNorm, EmbeddingLayer
24
24
  from nextrec.basic.model import BaseModel
25
- from nextrec.utils.types import TaskTypeName
25
+ from nextrec.utils.types import TaskTypeInput, TaskTypeName
26
26
 
27
27
 
28
28
  class SharedSpecificLinear(nn.Module):
@@ -73,7 +73,7 @@ class STAR(BaseModel):
73
73
  sparse_features: list[SparseFeature] | None = None,
74
74
  sequence_features: list[SequenceFeature] | None = None,
75
75
  target: list[str] | str | None = None,
76
- task: TaskTypeName | list[TaskTypeName] | None = None,
76
+ task: TaskTypeInput | list[TaskTypeInput] | None = None,
77
77
  mlp_params: dict | None = None,
78
78
  use_shared: bool = True,
79
79
  **kwargs,
@@ -1,6 +1,6 @@
1
1
  """
2
2
  Date: create on 01/01/2026
3
- Checkpoint: edit on 01/01/2026
3
+ Checkpoint: edit on 01/14/2026
4
4
  Author: Yang Zhou, zyaztec@gmail.com
5
5
  Reference:
6
6
  - [1] Yan B, Wang P, Zhang K, Li F, Deng H, Xu J, Zheng B. APG: Adaptive Parameter Generation Network for Click-Through Rate Prediction. Advances in Neural Information Processing Systems 35 (NeurIPS 2022), 2022.
@@ -20,7 +20,7 @@ from nextrec.basic.layers import EmbeddingLayer, MLP
20
20
  from nextrec.basic.heads import TaskHead
21
21
  from nextrec.basic.model import BaseModel
22
22
  from nextrec.utils.model import select_features
23
- from nextrec.utils.types import ActivationName, TaskTypeName
23
+ from nextrec.utils.types import ActivationName, TaskTypeInput, TaskTypeName
24
24
 
25
25
 
26
26
  class APGLayer(nn.Module):
@@ -233,7 +233,7 @@ class APG(BaseModel):
233
233
  sparse_features: list[SparseFeature] | None = None,
234
234
  sequence_features: list[SequenceFeature] | None = None,
235
235
  target: list[str] | str | None = None,
236
- task: TaskTypeName | list[TaskTypeName] | None = None,
236
+ task: TaskTypeInput | list[TaskTypeInput] | None = None,
237
237
  mlp_params: dict | None = None,
238
238
  inner_activation: ActivationName | None = None,
239
239
  generate_activation: ActivationName | None = None,
@@ -1,6 +1,6 @@
1
1
  """
2
2
  Date: create on 01/01/2026
3
- Checkpoint: edit on 01/01/2026
3
+ Checkpoint: edit on 01/14/2026
4
4
  Author: Yang Zhou, zyaztec@gmail.com
5
5
  Reference:
6
6
  - [1] Misra I, Shrivastava A, Gupta A, Hebert M. Cross-Stitch Networks for Multi-Task Learning. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2016), 2016, pp. 3994–4003.
@@ -21,7 +21,7 @@ from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
21
21
  from nextrec.basic.layers import EmbeddingLayer, MLP
22
22
  from nextrec.basic.heads import TaskHead
23
23
  from nextrec.basic.model import BaseModel
24
- from nextrec.utils.types import TaskTypeName
24
+ from nextrec.utils.types import TaskTypeInput, TaskTypeName
25
25
 
26
26
 
27
27
  class CrossStitchLayer(nn.Module):
@@ -76,7 +76,7 @@ class CrossStitch(BaseModel):
76
76
  sparse_features: list[SparseFeature] | None = None,
77
77
  sequence_features: list[SequenceFeature] | None = None,
78
78
  target: list[str] | str | None = None,
79
- task: TaskTypeName | list[TaskTypeName] | None = None,
79
+ task: TaskTypeInput | list[TaskTypeInput] | None = None,
80
80
  shared_mlp_params: dict | None = None,
81
81
  task_mlp_params: dict | None = None,
82
82
  tower_mlp_params: dict | None = None,
@@ -1,6 +1,6 @@
1
1
  """
2
2
  Date: create on 01/01/2026
3
- Checkpoint: edit on 01/01/2026
3
+ Checkpoint: edit on 01/14/2026
4
4
  Author: Yang Zhou, zyaztec@gmail.com
5
5
  Reference:
6
6
  - [1] Wang H, Chang T-W, Liu T, Huang J, Chen Z, Yu C, Li R, Chu W. ESCM²: Entire Space Counterfactual Multi-Task Model for Post-Click Conversion Rate Estimation. Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR ’22), 2022:363–372.
@@ -23,7 +23,7 @@ from nextrec.basic.layers import EmbeddingLayer, MLP
23
23
  from nextrec.basic.model import BaseModel
24
24
  from nextrec.loss.grad_norm import get_grad_norm_shared_params
25
25
  from nextrec.utils.model import compute_ranking_loss
26
- from nextrec.utils.types import TaskTypeName
26
+ from nextrec.utils.types import TaskTypeInput, TaskTypeName
27
27
 
28
28
 
29
29
  class ESCM(BaseModel):
@@ -52,7 +52,7 @@ class ESCM(BaseModel):
52
52
  imp_mlp_params: dict | None = None,
53
53
  use_dr: bool = False,
54
54
  target: list[str] | str | None = None,
55
- task: TaskTypeName | list[TaskTypeName] | None = None,
55
+ task: TaskTypeInput | list[TaskTypeInput] | None = None,
56
56
  **kwargs,
57
57
  ) -> None:
58
58
  dense_features = dense_features or []
@@ -1,6 +1,6 @@
1
1
  """
2
2
  Date: create on 09/11/2025
3
- Checkpoint: edit on 23/12/2025
3
+ Checkpoint: edit on 01/14/2026
4
4
  Author: Yang Zhou,zyaztec@gmail.com
5
5
  Reference:
6
6
  - [1] Ma X, Zhao L, Huang G, Wang Z, Hu Z, Zhu X, Gai K. Entire Space Multi-Task Model: An Effective Approach for Estimating Post-Click Conversion Rate. In: Proceedings of the 41st International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR ’18), 2018, pp. 1137–1140.
@@ -46,7 +46,7 @@ from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
46
46
  from nextrec.basic.layers import MLP, EmbeddingLayer
47
47
  from nextrec.basic.heads import TaskHead
48
48
  from nextrec.basic.model import BaseModel
49
- from nextrec.utils.types import TaskTypeName
49
+ from nextrec.utils.types import TaskTypeInput
50
50
 
51
51
 
52
52
  class ESMM(BaseModel):
@@ -76,7 +76,7 @@ class ESMM(BaseModel):
76
76
  sequence_features: list[SequenceFeature],
77
77
  ctr_mlp_params: dict,
78
78
  cvr_mlp_params: dict,
79
- task: list[TaskTypeName] | None = None,
79
+ task: list[TaskTypeInput] | None = None,
80
80
  target: list[str] | None = None, # Note: ctcvr = ctr * cvr
81
81
  **kwargs,
82
82
  ):
@@ -1,6 +1,6 @@
1
1
  """
2
2
  Date: create on 01/01/2026
3
- Checkpoint: edit on 01/01/2026
3
+ Checkpoint: edit on 01/14/2026
4
4
  Author: Yang Zhou, zyaztec@gmail.com
5
5
  [1] Zhao Z, Liu Y, Jin R, Zhu X, He X. HMOE: Improving Multi-Scenario Learning to Rank in E-commerce by Exploiting Task Relationships in the Label Space. Proceedings of the 29th ACM International Conference on Information & Knowledge Management (CIKM ’20), 2020, pp. 2069–2078.
6
6
  URL: https://dl.acm.org/doi/10.1145/3340531.3412713
@@ -23,7 +23,7 @@ from nextrec.basic.layers import MLP, EmbeddingLayer
23
23
  from nextrec.basic.heads import TaskHead
24
24
  from nextrec.basic.model import BaseModel
25
25
  from nextrec.utils.model import get_mlp_output_dim
26
- from nextrec.utils.types import TaskTypeName
26
+ from nextrec.utils.types import TaskTypeInput, TaskTypeName
27
27
 
28
28
 
29
29
  class HMOE(BaseModel):
@@ -53,7 +53,7 @@ class HMOE(BaseModel):
53
53
  tower_mlp_params_list: list[dict] | None = None,
54
54
  task_weight_mlp_params: list[dict] | None = None,
55
55
  target: list[str] | str | None = None,
56
- task: TaskTypeName | list[TaskTypeName] | None = None,
56
+ task: TaskTypeInput | list[TaskTypeInput] | None = None,
57
57
  **kwargs,
58
58
  ) -> None:
59
59
  dense_features = dense_features or []
@@ -1,6 +1,6 @@
1
1
  """
2
2
  Date: create on 09/11/2025
3
- Checkpoint: edit on 23/12/2025
3
+ Checkpoint: edit on 01/14/2026
4
4
  Author: Yang Zhou,zyaztec@gmail.com
5
5
  Reference:
6
6
  - [1] Ma J, Zhao Z, Yi X, Chen J, Hong L, Chi E H. Modeling Task Relationships in Multi-task Learning with Multi-gate Mixture-of-Experts. In: Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD ’18), 2018, pp. 1930–1939.
@@ -48,7 +48,7 @@ from nextrec.basic.features import DenseFeature, SequenceFeature, SparseFeature
48
48
  from nextrec.basic.layers import MLP, EmbeddingLayer
49
49
  from nextrec.basic.heads import TaskHead
50
50
  from nextrec.basic.model import BaseModel
51
- from nextrec.utils.types import TaskTypeName
51
+ from nextrec.utils.types import TaskTypeInput
52
52
 
53
53
 
54
54
  class MMOE(BaseModel):
@@ -81,7 +81,7 @@ class MMOE(BaseModel):
81
81
  num_experts: int = 3,
82
82
  tower_mlp_params_list: list[dict] | None = None,
83
83
  target: list[str] | str | None = None,
84
- task: TaskTypeName | list[TaskTypeName] | None = None,
84
+ task: TaskTypeInput | list[TaskTypeInput] | None = None,
85
85
  **kwargs,
86
86
  ):
87
87
 
@@ -1,6 +1,6 @@
1
1
  """
2
2
  Date: create on 01/01/2026
3
- Checkpoint: edit on 01/01/2026
3
+ Checkpoint: edit on 01/14/2026
4
4
  Author: Yang Zhou, zyaztec@gmail.com
5
5
  Reference:
6
6
  - [1] Chang J, Zhang C, Hui Y, Leng D, Niu Y, Song Y, Gai K. PEPNet: Parameter and Embedding Personalized Network for Infusing with Personalized Prior Information. In: Proceedings of the 29th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD ’23), 2023.
@@ -58,7 +58,7 @@ from nextrec.basic.layers import EmbeddingLayer, GateMLP
58
58
  from nextrec.basic.heads import TaskHead
59
59
  from nextrec.basic.model import BaseModel
60
60
  from nextrec.utils.model import select_features
61
- from nextrec.utils.types import TaskTypeName
61
+ from nextrec.utils.types import TaskTypeInput, TaskTypeName
62
62
 
63
63
 
64
64
  class PPNet(nn.Module):
@@ -184,7 +184,7 @@ class PEPNet(BaseModel):
184
184
  sparse_features: list[SparseFeature] | None = None,
185
185
  sequence_features: list[SequenceFeature] | None = None,
186
186
  target: list[str] | str | None = None,
187
- task: TaskTypeName | list[TaskTypeName] | None = None,
187
+ task: TaskTypeInput | list[TaskTypeInput] | None = None,
188
188
  mlp_params: dict | None = None,
189
189
  feature_gate_mlp_params: dict | None = None,
190
190
  gate_mlp_params: dict | None = None,
@@ -334,7 +334,7 @@ class PEPNet(BaseModel):
334
334
 
335
335
  task_logits = []
336
336
  for block in self.ppnet_blocks:
337
- task_logits.append(block(o_ep=dnn_input, o_prior=task_sf_emb))
337
+ task_logits.append(block(o_ep=dnn_input, o_prior=task_sf_emb))
338
338
 
339
339
  y = torch.cat(task_logits, dim=1)
340
340
  return self.prediction_layer(y)
@@ -1,6 +1,6 @@
1
1
  """
2
2
  Date: create on 09/11/2025
3
- Checkpoint: edit on 23/12/2025
3
+ Checkpoint: edit on 01/14/2026
4
4
  Author: Yang Zhou,zyaztec@gmail.com
5
5
  Reference:
6
6
  - [1] Tang H, Liu J, Zhao M, Gong X. Progressive Layered Extraction (PLE): A Novel Multi-Task Learning (MTL) Model for Personalized Recommendations. In: Proceedings of the 14th ACM Conference on Recommender Systems (RecSys ’20), 2020, pp. 269–278.
@@ -52,7 +52,7 @@ from nextrec.basic.layers import MLP, EmbeddingLayer
52
52
  from nextrec.basic.heads import TaskHead
53
53
  from nextrec.basic.model import BaseModel
54
54
  from nextrec.utils.model import get_mlp_output_dim
55
-
55
+ from nextrec.utils.types import TaskTypeInput
56
56
 
57
57
  class CGCLayer(nn.Module):
58
58
  """
@@ -202,7 +202,7 @@ class PLE(BaseModel):
202
202
  num_levels: int = 2,
203
203
  tower_mlp_params_list: list[dict] | None = None,
204
204
  target: list[str] | None = None,
205
- task: str | list[str] | None = None,
205
+ task: TaskTypeInput | list[TaskTypeInput] | None = None,
206
206
  **kwargs,
207
207
  ):
208
208