nextrec 0.1.11__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. nextrec/__version__.py +1 -1
  2. nextrec/basic/activation.py +1 -2
  3. nextrec/basic/callback.py +1 -2
  4. nextrec/basic/features.py +39 -8
  5. nextrec/basic/layers.py +3 -4
  6. nextrec/basic/loggers.py +15 -10
  7. nextrec/basic/metrics.py +1 -2
  8. nextrec/basic/model.py +160 -125
  9. nextrec/basic/session.py +150 -0
  10. nextrec/data/__init__.py +13 -2
  11. nextrec/data/data_utils.py +74 -22
  12. nextrec/data/dataloader.py +513 -0
  13. nextrec/data/preprocessor.py +494 -134
  14. nextrec/loss/__init__.py +31 -24
  15. nextrec/loss/listwise.py +164 -0
  16. nextrec/loss/loss_utils.py +133 -106
  17. nextrec/loss/pairwise.py +105 -0
  18. nextrec/loss/pointwise.py +198 -0
  19. nextrec/models/match/dssm.py +26 -17
  20. nextrec/models/match/dssm_v2.py +20 -2
  21. nextrec/models/match/mind.py +18 -3
  22. nextrec/models/match/sdm.py +17 -2
  23. nextrec/models/match/youtube_dnn.py +23 -10
  24. nextrec/models/multi_task/esmm.py +8 -8
  25. nextrec/models/multi_task/mmoe.py +8 -8
  26. nextrec/models/multi_task/ple.py +8 -8
  27. nextrec/models/multi_task/share_bottom.py +8 -8
  28. nextrec/models/ranking/__init__.py +8 -0
  29. nextrec/models/ranking/afm.py +5 -4
  30. nextrec/models/ranking/autoint.py +6 -4
  31. nextrec/models/ranking/dcn.py +6 -4
  32. nextrec/models/ranking/deepfm.py +5 -4
  33. nextrec/models/ranking/dien.py +6 -4
  34. nextrec/models/ranking/din.py +6 -4
  35. nextrec/models/ranking/fibinet.py +6 -4
  36. nextrec/models/ranking/fm.py +6 -4
  37. nextrec/models/ranking/masknet.py +6 -4
  38. nextrec/models/ranking/pnn.py +6 -4
  39. nextrec/models/ranking/widedeep.py +6 -4
  40. nextrec/models/ranking/xdeepfm.py +6 -4
  41. nextrec/utils/__init__.py +7 -11
  42. nextrec/utils/embedding.py +2 -4
  43. nextrec/utils/initializer.py +4 -5
  44. nextrec/utils/optimizer.py +7 -8
  45. {nextrec-0.1.11.dist-info → nextrec-0.2.2.dist-info}/METADATA +3 -3
  46. nextrec-0.2.2.dist-info/RECORD +53 -0
  47. nextrec/basic/dataloader.py +0 -447
  48. nextrec/loss/match_losses.py +0 -294
  49. nextrec/utils/common.py +0 -14
  50. nextrec-0.1.11.dist-info/RECORD +0 -51
  51. {nextrec-0.1.11.dist-info → nextrec-0.2.2.dist-info}/WHEEL +0 -0
  52. {nextrec-0.1.11.dist-info → nextrec-0.2.2.dist-info}/licenses/LICENSE +0 -0
@@ -1,9 +1,7 @@
1
1
  """
2
2
  Date: create on 09/11/2025
3
- Author:
4
- Yang Zhou,zyaztec@gmail.com
5
- Reference:
6
- [1] Caruana R. Multitask learning[J]. Machine learning, 1997, 28: 41-75.
3
+ Author: Yang Zhou,zyaztec@gmail.com
4
+ Reference: [1] Caruana R. Multitask learning[J]. Machine learning, 1997, 28: 41-75.
7
5
  """
8
6
 
9
7
  import torch
@@ -35,12 +33,13 @@ class ShareBottom(BaseModel):
35
33
  optimizer: str = "adam",
36
34
  optimizer_params: dict = {},
37
35
  loss: str | nn.Module | list[str | nn.Module] | None = "bce",
36
+ loss_params: dict | list[dict] | None = None,
38
37
  device: str = 'cpu',
39
- model_id: str = "baseline",
40
38
  embedding_l1_reg=1e-6,
41
39
  dense_l1_reg=1e-5,
42
40
  embedding_l2_reg=1e-5,
43
- dense_l2_reg=1e-4):
41
+ dense_l2_reg=1e-4,
42
+ **kwargs):
44
43
 
45
44
  super(ShareBottom, self).__init__(
46
45
  dense_features=dense_features,
@@ -54,7 +53,7 @@ class ShareBottom(BaseModel):
54
53
  embedding_l2_reg=embedding_l2_reg,
55
54
  dense_l2_reg=dense_l2_reg,
56
55
  early_stop_patience=20,
57
- model_id=model_id
56
+ **kwargs
58
57
  )
59
58
 
60
59
  self.loss = loss
@@ -105,7 +104,8 @@ class ShareBottom(BaseModel):
105
104
  self.compile(
106
105
  optimizer=optimizer,
107
106
  optimizer_params=optimizer_params,
108
- loss=loss
107
+ loss=loss,
108
+ loss_params=loss_params,
109
109
  )
110
110
 
111
111
  def forward(self, x):
@@ -1,3 +1,7 @@
1
+ from .fm import FM
2
+ from .afm import AFM
3
+ from .masknet import MaskNet
4
+ from .pnn import PNN
1
5
  from .deepfm import DeepFM
2
6
  from .autoint import AutoInt
3
7
  from .widedeep import WideDeep
@@ -14,4 +18,8 @@ __all__ = [
14
18
  'DCN',
15
19
  'DIN',
16
20
  'DIEN',
21
+ 'FM',
22
+ 'AFM',
23
+ 'MaskNet',
24
+ 'PNN',
17
25
  ]
@@ -34,12 +34,12 @@ class AFM(BaseModel):
34
34
  optimizer: str = "adam",
35
35
  optimizer_params: dict = {},
36
36
  loss: str | nn.Module | None = "bce",
37
+ loss_params: dict | list[dict] | None = None,
37
38
  device: str = 'cpu',
38
- model_id: str = "baseline",
39
39
  embedding_l1_reg=1e-6,
40
40
  dense_l1_reg=1e-5,
41
41
  embedding_l2_reg=1e-5,
42
- dense_l2_reg=1e-4):
42
+ dense_l2_reg=1e-4, **kwargs):
43
43
 
44
44
  super(AFM, self).__init__(
45
45
  dense_features=dense_features,
@@ -53,7 +53,7 @@ class AFM(BaseModel):
53
53
  embedding_l2_reg=embedding_l2_reg,
54
54
  dense_l2_reg=dense_l2_reg,
55
55
  early_stop_patience=20,
56
- model_id=model_id
56
+ **kwargs
57
57
  )
58
58
 
59
59
  self.loss = loss
@@ -89,7 +89,8 @@ class AFM(BaseModel):
89
89
  self.compile(
90
90
  optimizer=optimizer,
91
91
  optimizer_params=optimizer_params,
92
- loss=loss
92
+ loss=loss,
93
+ loss_params=loss_params,
93
94
  )
94
95
 
95
96
  def forward(self, x):
@@ -39,12 +39,13 @@ class AutoInt(BaseModel):
39
39
  optimizer: str = "adam",
40
40
  optimizer_params: dict = {},
41
41
  loss: str | nn.Module | None = "bce",
42
+ loss_params: dict | list[dict] | None = None,
42
43
  device: str = 'cpu',
43
- model_id: str = "baseline",
44
44
  embedding_l1_reg=1e-6,
45
45
  dense_l1_reg=1e-5,
46
46
  embedding_l2_reg=1e-5,
47
- dense_l2_reg=1e-4):
47
+ dense_l2_reg=1e-4,
48
+ **kwargs):
48
49
 
49
50
  super(AutoInt, self).__init__(
50
51
  dense_features=dense_features,
@@ -58,7 +59,7 @@ class AutoInt(BaseModel):
58
59
  embedding_l2_reg=embedding_l2_reg,
59
60
  dense_l2_reg=dense_l2_reg,
60
61
  early_stop_patience=20,
61
- model_id=model_id
62
+ **kwargs
62
63
  )
63
64
 
64
65
  self.loss = loss
@@ -113,7 +114,8 @@ class AutoInt(BaseModel):
113
114
  self.compile(
114
115
  optimizer=optimizer,
115
116
  optimizer_params=optimizer_params,
116
- loss=loss
117
+ loss=loss,
118
+ loss_params=loss_params,
117
119
  )
118
120
 
119
121
  def forward(self, x):
@@ -35,12 +35,13 @@ class DCN(BaseModel):
35
35
  optimizer: str = "adam",
36
36
  optimizer_params: dict = {},
37
37
  loss: str | nn.Module | None = "bce",
38
+ loss_params: dict | list[dict] | None = None,
38
39
  device: str = 'cpu',
39
- model_id: str = "baseline",
40
40
  embedding_l1_reg=1e-6,
41
41
  dense_l1_reg=1e-5,
42
42
  embedding_l2_reg=1e-5,
43
- dense_l2_reg=1e-4):
43
+ dense_l2_reg=1e-4,
44
+ **kwargs):
44
45
 
45
46
  super(DCN, self).__init__(
46
47
  dense_features=dense_features,
@@ -54,7 +55,7 @@ class DCN(BaseModel):
54
55
  embedding_l2_reg=embedding_l2_reg,
55
56
  dense_l2_reg=dense_l2_reg,
56
57
  early_stop_patience=20,
57
- model_id=model_id
58
+ **kwargs
58
59
  )
59
60
 
60
61
  self.loss = loss
@@ -97,7 +98,8 @@ class DCN(BaseModel):
97
98
  self.compile(
98
99
  optimizer=optimizer,
99
100
  optimizer_params=optimizer_params,
100
- loss=loss
101
+ loss=loss,
102
+ loss_params=loss_params,
101
103
  )
102
104
 
103
105
  def forward(self, x):
@@ -31,12 +31,12 @@ class DeepFM(BaseModel):
31
31
  optimizer: str = "adam",
32
32
  optimizer_params: dict = {},
33
33
  loss: str | nn.Module | None = "bce",
34
+ loss_params: dict | list[dict] | None = None,
34
35
  device: str = 'cpu',
35
- model_id: str = "baseline",
36
36
  embedding_l1_reg=1e-6,
37
37
  dense_l1_reg=1e-5,
38
38
  embedding_l2_reg=1e-5,
39
- dense_l2_reg=1e-4):
39
+ dense_l2_reg=1e-4, **kwargs):
40
40
 
41
41
  super(DeepFM, self).__init__(
42
42
  dense_features=dense_features,
@@ -50,7 +50,7 @@ class DeepFM(BaseModel):
50
50
  embedding_l2_reg=embedding_l2_reg,
51
51
  dense_l2_reg=dense_l2_reg,
52
52
  early_stop_patience=20,
53
- model_id=model_id
53
+ **kwargs
54
54
  )
55
55
 
56
56
  self.loss = loss
@@ -80,7 +80,8 @@ class DeepFM(BaseModel):
80
80
  self.compile(
81
81
  optimizer=optimizer,
82
82
  optimizer_params=optimizer_params,
83
- loss=loss
83
+ loss=loss,
84
+ loss_params=loss_params,
84
85
  )
85
86
 
86
87
  def forward(self, x):
@@ -38,12 +38,13 @@ class DIEN(BaseModel):
38
38
  optimizer: str = "adam",
39
39
  optimizer_params: dict = {},
40
40
  loss: str | nn.Module | None = "bce",
41
+ loss_params: dict | list[dict] | None = None,
41
42
  device: str = 'cpu',
42
- model_id: str = "baseline",
43
43
  embedding_l1_reg=1e-6,
44
44
  dense_l1_reg=1e-5,
45
45
  embedding_l2_reg=1e-5,
46
- dense_l2_reg=1e-4):
46
+ dense_l2_reg=1e-4,
47
+ **kwargs):
47
48
 
48
49
  super(DIEN, self).__init__(
49
50
  dense_features=dense_features,
@@ -57,7 +58,7 @@ class DIEN(BaseModel):
57
58
  embedding_l2_reg=embedding_l2_reg,
58
59
  dense_l2_reg=dense_l2_reg,
59
60
  early_stop_patience=20,
60
- model_id=model_id
61
+ **kwargs
61
62
  )
62
63
 
63
64
  self.loss = loss
@@ -128,7 +129,8 @@ class DIEN(BaseModel):
128
129
  self.compile(
129
130
  optimizer=optimizer,
130
131
  optimizer_params=optimizer_params,
131
- loss=loss
132
+ loss=loss,
133
+ loss_params=loss_params,
132
134
  )
133
135
 
134
136
  def forward(self, x):
@@ -37,12 +37,13 @@ class DIN(BaseModel):
37
37
  optimizer: str = "adam",
38
38
  optimizer_params: dict = {},
39
39
  loss: str | nn.Module | None = "bce",
40
+ loss_params: dict | list[dict] | None = None,
40
41
  device: str = 'cpu',
41
- model_id: str = "baseline",
42
42
  embedding_l1_reg=1e-6,
43
43
  dense_l1_reg=1e-5,
44
44
  embedding_l2_reg=1e-5,
45
- dense_l2_reg=1e-4):
45
+ dense_l2_reg=1e-4,
46
+ **kwargs):
46
47
 
47
48
  super(DIN, self).__init__(
48
49
  dense_features=dense_features,
@@ -56,7 +57,7 @@ class DIN(BaseModel):
56
57
  embedding_l2_reg=embedding_l2_reg,
57
58
  dense_l2_reg=dense_l2_reg,
58
59
  early_stop_patience=20,
59
- model_id=model_id
60
+ **kwargs
60
61
  )
61
62
 
62
63
  self.loss = loss
@@ -115,7 +116,8 @@ class DIN(BaseModel):
115
116
  self.compile(
116
117
  optimizer=optimizer,
117
118
  optimizer_params=optimizer_params,
118
- loss=loss
119
+ loss=loss,
120
+ loss_params=loss_params,
119
121
  )
120
122
 
121
123
  def forward(self, x):
@@ -42,12 +42,13 @@ class FiBiNET(BaseModel):
42
42
  optimizer: str = "adam",
43
43
  optimizer_params: dict = {},
44
44
  loss: str | nn.Module | None = "bce",
45
+ loss_params: dict | list[dict] | None = None,
45
46
  device: str = 'cpu',
46
- model_id: str = "baseline",
47
47
  embedding_l1_reg=1e-6,
48
48
  dense_l1_reg=1e-5,
49
49
  embedding_l2_reg=1e-5,
50
- dense_l2_reg=1e-4):
50
+ dense_l2_reg=1e-4,
51
+ **kwargs):
51
52
 
52
53
  super(FiBiNET, self).__init__(
53
54
  dense_features=dense_features,
@@ -61,7 +62,7 @@ class FiBiNET(BaseModel):
61
62
  embedding_l2_reg=embedding_l2_reg,
62
63
  dense_l2_reg=dense_l2_reg,
63
64
  early_stop_patience=20,
64
- model_id=model_id
65
+ **kwargs
65
66
  )
66
67
 
67
68
  self.loss = loss
@@ -111,7 +112,8 @@ class FiBiNET(BaseModel):
111
112
  self.compile(
112
113
  optimizer=optimizer,
113
114
  optimizer_params=optimizer_params,
114
- loss=loss
115
+ loss=loss,
116
+ loss_params=loss_params,
115
117
  )
116
118
 
117
119
  def forward(self, x):
@@ -30,12 +30,13 @@ class FM(BaseModel):
30
30
  optimizer: str = "adam",
31
31
  optimizer_params: dict = {},
32
32
  loss: str | nn.Module | None = "bce",
33
+ loss_params: dict | list[dict] | None = None,
33
34
  device: str = 'cpu',
34
- model_id: str = "baseline",
35
35
  embedding_l1_reg=1e-6,
36
36
  dense_l1_reg=1e-5,
37
37
  embedding_l2_reg=1e-5,
38
- dense_l2_reg=1e-4):
38
+ dense_l2_reg=1e-4,
39
+ **kwargs):
39
40
 
40
41
  super(FM, self).__init__(
41
42
  dense_features=dense_features,
@@ -49,7 +50,7 @@ class FM(BaseModel):
49
50
  embedding_l2_reg=embedding_l2_reg,
50
51
  dense_l2_reg=dense_l2_reg,
51
52
  early_stop_patience=20,
52
- model_id=model_id
53
+ **kwargs
53
54
  )
54
55
 
55
56
  self.loss = loss
@@ -76,7 +77,8 @@ class FM(BaseModel):
76
77
  self.compile(
77
78
  optimizer=optimizer,
78
79
  optimizer_params=optimizer_params,
79
- loss=loss
80
+ loss=loss,
81
+ loss_params=loss_params,
80
82
  )
81
83
 
82
84
  def forward(self, x):
@@ -36,12 +36,13 @@ class MaskNet(BaseModel):
36
36
  optimizer: str = "adam",
37
37
  optimizer_params: dict = {},
38
38
  loss: str | nn.Module | None = "bce",
39
+ loss_params: dict | list[dict] | None = None,
39
40
  device: str = 'cpu',
40
- model_id: str = "baseline",
41
41
  embedding_l1_reg=1e-6,
42
42
  dense_l1_reg=1e-5,
43
43
  embedding_l2_reg=1e-5,
44
- dense_l2_reg=1e-4):
44
+ dense_l2_reg=1e-4,
45
+ **kwargs):
45
46
 
46
47
  super(MaskNet, self).__init__(
47
48
  dense_features=dense_features,
@@ -55,7 +56,7 @@ class MaskNet(BaseModel):
55
56
  embedding_l2_reg=embedding_l2_reg,
56
57
  dense_l2_reg=dense_l2_reg,
57
58
  early_stop_patience=20,
58
- model_id=model_id
59
+ **kwargs
59
60
  )
60
61
 
61
62
  self.loss = loss
@@ -98,7 +99,8 @@ class MaskNet(BaseModel):
98
99
  self.compile(
99
100
  optimizer=optimizer,
100
101
  optimizer_params=optimizer_params,
101
- loss=loss
102
+ loss=loss,
103
+ loss_params=loss_params,
102
104
  )
103
105
 
104
106
  def forward(self, x):
@@ -34,12 +34,13 @@ class PNN(BaseModel):
34
34
  optimizer: str = "adam",
35
35
  optimizer_params: dict = {},
36
36
  loss: str | nn.Module | None = "bce",
37
+ loss_params: dict | list[dict] | None = None,
37
38
  device: str = 'cpu',
38
- model_id: str = "baseline",
39
39
  embedding_l1_reg=1e-6,
40
40
  dense_l1_reg=1e-5,
41
41
  embedding_l2_reg=1e-5,
42
- dense_l2_reg=1e-4):
42
+ dense_l2_reg=1e-4,
43
+ **kwargs):
43
44
 
44
45
  super(PNN, self).__init__(
45
46
  dense_features=dense_features,
@@ -53,7 +54,7 @@ class PNN(BaseModel):
53
54
  embedding_l2_reg=embedding_l2_reg,
54
55
  dense_l2_reg=dense_l2_reg,
55
56
  early_stop_patience=20,
56
- model_id=model_id
57
+ **kwargs
57
58
  )
58
59
 
59
60
  self.loss = loss
@@ -98,7 +99,8 @@ class PNN(BaseModel):
98
99
  self.compile(
99
100
  optimizer=optimizer,
100
101
  optimizer_params=optimizer_params,
101
- loss=loss
102
+ loss=loss,
103
+ loss_params=loss_params,
102
104
  )
103
105
 
104
106
  def forward(self, x):
@@ -34,12 +34,13 @@ class WideDeep(BaseModel):
34
34
  optimizer: str = "adam",
35
35
  optimizer_params: dict = {},
36
36
  loss: str | nn.Module | None = "bce",
37
+ loss_params: dict | list[dict] | None = None,
37
38
  device: str = 'cpu',
38
- model_id: str = "baseline",
39
39
  embedding_l1_reg=1e-6,
40
40
  dense_l1_reg=1e-5,
41
41
  embedding_l2_reg=1e-5,
42
- dense_l2_reg=1e-4):
42
+ dense_l2_reg=1e-4,
43
+ **kwargs):
43
44
 
44
45
  super(WideDeep, self).__init__(
45
46
  dense_features=dense_features,
@@ -53,7 +54,7 @@ class WideDeep(BaseModel):
53
54
  embedding_l2_reg=embedding_l2_reg,
54
55
  dense_l2_reg=dense_l2_reg,
55
56
  early_stop_patience=20,
56
- model_id=model_id
57
+ **kwargs
57
58
  )
58
59
 
59
60
  self.loss = loss
@@ -88,7 +89,8 @@ class WideDeep(BaseModel):
88
89
  self.compile(
89
90
  optimizer=optimizer,
90
91
  optimizer_params=optimizer_params,
91
- loss=loss
92
+ loss=loss,
93
+ loss_params=loss_params,
92
94
  )
93
95
 
94
96
  def forward(self, x):
@@ -37,12 +37,13 @@ class xDeepFM(BaseModel):
37
37
  optimizer: str = "adam",
38
38
  optimizer_params: dict = {},
39
39
  loss: str | nn.Module | None = "bce",
40
+ loss_params: dict | list[dict] | None = None,
40
41
  device: str = 'cpu',
41
- model_id: str = "baseline",
42
42
  embedding_l1_reg=1e-6,
43
43
  dense_l1_reg=1e-5,
44
44
  embedding_l2_reg=1e-5,
45
- dense_l2_reg=1e-4):
45
+ dense_l2_reg=1e-4,
46
+ **kwargs):
46
47
 
47
48
  super(xDeepFM, self).__init__(
48
49
  dense_features=dense_features,
@@ -56,7 +57,7 @@ class xDeepFM(BaseModel):
56
57
  embedding_l2_reg=embedding_l2_reg,
57
58
  dense_l2_reg=dense_l2_reg,
58
59
  early_stop_patience=20,
59
- model_id=model_id
60
+ **kwargs
60
61
  )
61
62
 
62
63
  self.loss = loss
@@ -95,7 +96,8 @@ class xDeepFM(BaseModel):
95
96
  self.compile(
96
97
  optimizer=optimizer,
97
98
  optimizer_params=optimizer_params,
98
- loss=loss
99
+ loss=loss,
100
+ loss_params=loss_params,
99
101
  )
100
102
 
101
103
  def forward(self, x):
nextrec/utils/__init__.py CHANGED
@@ -1,18 +1,14 @@
1
- from nextrec.utils.optimizer import get_optimizer_fn, get_scheduler_fn
2
- from nextrec.utils.initializer import get_initializer_fn
3
- from nextrec.utils.embedding import get_auto_embedding_dim
4
- from nextrec.utils.common import get_task_type
5
-
6
- from nextrec.utils import optimizer, initializer, embedding, common
1
+ from .optimizer import get_optimizer, get_scheduler
2
+ from .initializer import get_initializer
3
+ from .embedding import get_auto_embedding_dim
4
+ from . import optimizer, initializer, embedding
7
5
 
8
6
  __all__ = [
9
- 'get_optimizer_fn',
10
- 'get_scheduler_fn',
11
- 'get_initializer_fn',
7
+ 'get_optimizer',
8
+ 'get_scheduler',
9
+ 'get_initializer',
12
10
  'get_auto_embedding_dim',
13
- 'get_task_type',
14
11
  'optimizer',
15
12
  'initializer',
16
13
  'embedding',
17
- 'common',
18
14
  ]
@@ -2,8 +2,7 @@
2
2
  Embedding utilities for NextRec
3
3
 
4
4
  Date: create on 13/11/2025
5
- Author:
6
- Yang Zhou, zyaztec@gmail.com
5
+ Author: Yang Zhou, zyaztec@gmail.com
7
6
  """
8
7
 
9
8
  import numpy as np
@@ -13,7 +12,6 @@ def get_auto_embedding_dim(num_classes: int) -> int:
13
12
  """
14
13
  Calculate the dim of embedding vector according to number of classes in the category.
15
14
  Formula: emb_dim = [6 * (num_classes)^(1/4)]
16
- Reference:
17
- Deep & Cross Network for Ad Click Predictions.(ADKDD'17)
15
+ Reference: Deep & Cross Network for Ad Click Predictions.(ADKDD'17)
18
16
  """
19
17
  return int(np.floor(6 * np.power(num_classes, 0.25)))
@@ -2,21 +2,20 @@
2
2
  Initialization utilities for NextRec
3
3
 
4
4
  Date: create on 13/11/2025
5
- Author:
6
- Yang Zhou, zyaztec@gmail.com
5
+ Author: Yang Zhou, zyaztec@gmail.com
7
6
  """
8
7
 
9
8
  import torch.nn as nn
10
9
 
11
10
 
12
- def get_initializer_fn(init_type='normal', activation='linear', param=None):
11
+ def get_initializer(init_type='normal', activation='linear', param=None):
13
12
  """
14
13
  Get parameter initialization function.
15
14
 
16
15
  Examples:
17
- >>> init_fn = get_initializer_fn('xavier_uniform', 'relu')
16
+ >>> init_fn = get_initializer('xavier_uniform', 'relu')
18
17
  >>> init_fn(tensor)
19
- >>> init_fn = get_initializer_fn('normal', param={'mean': 0.0, 'std': 0.01})
18
+ >>> init_fn = get_initializer('normal', param={'mean': 0.0, 'std': 0.01})
20
19
  """
21
20
  param = param or {}
22
21
 
@@ -2,15 +2,14 @@
2
2
  Optimizer and Scheduler utilities for NextRec
3
3
 
4
4
  Date: create on 13/11/2025
5
- Author:
6
- Yang Zhou, zyaztec@gmail.com
5
+ Author: Yang Zhou, zyaztec@gmail.com
7
6
  """
8
7
 
9
8
  import torch
10
9
  from typing import Iterable
11
10
 
12
11
 
13
- def get_optimizer_fn(
12
+ def get_optimizer(
14
13
  optimizer: str = "adam",
15
14
  params: Iterable[torch.nn.Parameter] | None = None,
16
15
  **optimizer_params
@@ -19,8 +18,8 @@ def get_optimizer_fn(
19
18
  Get optimizer function based on optimizer name or instance.
20
19
 
21
20
  Examples:
22
- >>> optimizer = get_optimizer_fn("adam", model.parameters(), lr=1e-3)
23
- >>> optimizer = get_optimizer_fn("sgd", model.parameters(), lr=0.01, momentum=0.9)
21
+ >>> optimizer = get_optimizer("adam", model.parameters(), lr=1e-3)
22
+ >>> optimizer = get_optimizer("sgd", model.parameters(), lr=0.01, momentum=0.9)
24
23
  """
25
24
  if params is None:
26
25
  raise ValueError("params cannot be None. Please provide model parameters.")
@@ -52,13 +51,13 @@ def get_optimizer_fn(
52
51
  return optimizer_fn
53
52
 
54
53
 
55
- def get_scheduler_fn(scheduler, optimizer, **scheduler_params):
54
+ def get_scheduler(scheduler, optimizer, **scheduler_params):
56
55
  """
57
56
  Get learning rate scheduler function.
58
57
 
59
58
  Examples:
60
- >>> scheduler = get_scheduler_fn("step", optimizer, step_size=10, gamma=0.1)
61
- >>> scheduler = get_scheduler_fn("cosine", optimizer, T_max=100)
59
+ >>> scheduler = get_scheduler("step", optimizer, step_size=10, gamma=0.1)
60
+ >>> scheduler = get_scheduler("cosine", optimizer, T_max=100)
62
61
  """
63
62
  if isinstance(scheduler, str):
64
63
  if scheduler == "step":
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nextrec
3
- Version: 0.1.11
3
+ Version: 0.2.2
4
4
  Summary: A comprehensive recommendation library with match, ranking, and multi-task learning models
5
5
  Project-URL: Homepage, https://github.com/zerolovesea/NextRec
6
6
  Project-URL: Repository, https://github.com/zerolovesea/NextRec
@@ -61,7 +61,7 @@ Description-Content-Type: text/markdown
61
61
  ![Python](https://img.shields.io/badge/Python-3.10+-blue.svg)
62
62
  ![PyTorch](https://img.shields.io/badge/PyTorch-1.10+-ee4c2c.svg)
63
63
  ![License](https://img.shields.io/badge/License-Apache%202.0-green.svg)
64
- ![Version](https://img.shields.io/badge/Version-0.1.11-orange.svg)
64
+ ![Version](https://img.shields.io/badge/Version-0.2.2-orange.svg)
65
65
 
66
66
  English | [中文版](README_zh.md)
67
67
 
@@ -123,7 +123,7 @@ model = DeepFM(
123
123
  mlp_params={"dims": [256, 128], "activation": "relu", "dropout": 0.5},
124
124
  target=target,
125
125
  device='cpu',
126
- model_id="deepfm_with_processor",
126
+ session_id="deepfm_with_processor",
127
127
  embedding_l1_reg=1e-6,
128
128
  dense_l1_reg=1e-5,
129
129
  embedding_l2_reg=1e-5,