nextrec 0.1.10__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. nextrec/__version__.py +1 -1
  2. nextrec/basic/activation.py +1 -2
  3. nextrec/basic/callback.py +1 -2
  4. nextrec/basic/features.py +39 -8
  5. nextrec/basic/layers.py +1 -2
  6. nextrec/basic/loggers.py +15 -10
  7. nextrec/basic/metrics.py +1 -2
  8. nextrec/basic/model.py +87 -84
  9. nextrec/basic/session.py +150 -0
  10. nextrec/data/__init__.py +13 -2
  11. nextrec/data/data_utils.py +74 -22
  12. nextrec/data/dataloader.py +513 -0
  13. nextrec/data/preprocessor.py +494 -134
  14. nextrec/loss/listwise.py +6 -0
  15. nextrec/loss/loss_utils.py +1 -2
  16. nextrec/loss/match_losses.py +4 -5
  17. nextrec/loss/pairwise.py +6 -0
  18. nextrec/loss/pointwise.py +6 -0
  19. nextrec/models/match/dssm.py +2 -2
  20. nextrec/models/match/dssm_v2.py +2 -2
  21. nextrec/models/match/mind.py +2 -2
  22. nextrec/models/match/sdm.py +2 -2
  23. nextrec/models/match/youtube_dnn.py +2 -2
  24. nextrec/models/multi_task/esmm.py +3 -3
  25. nextrec/models/multi_task/mmoe.py +3 -3
  26. nextrec/models/multi_task/ple.py +3 -3
  27. nextrec/models/multi_task/share_bottom.py +3 -3
  28. nextrec/models/ranking/afm.py +2 -3
  29. nextrec/models/ranking/autoint.py +3 -3
  30. nextrec/models/ranking/dcn.py +3 -3
  31. nextrec/models/ranking/deepfm.py +2 -3
  32. nextrec/models/ranking/dien.py +3 -3
  33. nextrec/models/ranking/din.py +3 -3
  34. nextrec/models/ranking/fibinet.py +3 -3
  35. nextrec/models/ranking/fm.py +3 -3
  36. nextrec/models/ranking/masknet.py +3 -3
  37. nextrec/models/ranking/pnn.py +3 -3
  38. nextrec/models/ranking/widedeep.py +3 -3
  39. nextrec/models/ranking/xdeepfm.py +3 -3
  40. nextrec/utils/__init__.py +4 -8
  41. nextrec/utils/embedding.py +2 -4
  42. nextrec/utils/initializer.py +1 -2
  43. nextrec/utils/optimizer.py +1 -2
  44. {nextrec-0.1.10.dist-info → nextrec-0.2.1.dist-info}/METADATA +4 -5
  45. nextrec-0.2.1.dist-info/RECORD +54 -0
  46. nextrec/basic/dataloader.py +0 -447
  47. nextrec/utils/common.py +0 -14
  48. nextrec-0.1.10.dist-info/RECORD +0 -51
  49. {nextrec-0.1.10.dist-info → nextrec-0.2.1.dist-info}/WHEEL +0 -0
  50. {nextrec-0.1.10.dist-info → nextrec-0.2.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,6 @@
1
+ """
2
+ Loss functions for listwise tasks
3
+
4
+ Date: create on 22/11/2025
5
+ Author: Yang Zhou,zyaztec@gmail.com
6
+ """
@@ -2,8 +2,7 @@
2
2
  Loss utilities for NextRec
3
3
 
4
4
  Date: create on 09/11/2025
5
- Author:
6
- Yang Zhou,zyaztec@gmail.com
5
+ Author: Yang Zhou,zyaztec@gmail.com
7
6
  """
8
7
  import torch
9
8
  import torch.nn as nn
@@ -2,8 +2,7 @@
2
2
  Loss functions for matching tasks
3
3
 
4
4
  Date: create on 13/11/2025
5
- Author:
6
- Yang Zhou,zyaztec@gmail.com
5
+ Author: Yang Zhou,zyaztec@gmail.com
7
6
  """
8
7
 
9
8
  import torch
@@ -222,9 +221,9 @@ class ListMLELoss(nn.Module):
222
221
 
223
222
  class ApproxNDCGLoss(nn.Module):
224
223
  """
225
- Approximate NDCG loss for learning to rank
226
- Reference: Qin et al. A General Approximation Framework for Direct Optimization of
227
- Information Retrieval Measures (Information Retrieval 2010)
224
+ Approximate NDCG loss for learning to rank.
225
+ Reference: Qin et al. "A General Approximation Framework for Direct Optimization of
226
+ Information Retrieval Measures" (Information Retrieval 2010).
228
227
  """
229
228
  def __init__(self, temperature: float = 1.0, reduction: str = 'mean'):
230
229
  super(ApproxNDCGLoss, self).__init__()
@@ -0,0 +1,6 @@
1
+ """
2
+ Loss functions for pairwise tasks
3
+
4
+ Date: create on 22/11/2025
5
+ Author: Yang Zhou,zyaztec@gmail.com
6
+ """
@@ -0,0 +1,6 @@
1
+ """
2
+ Loss functions for pointwise tasks
3
+
4
+ Date: create on 22/11/2025
5
+ Author: Yang Zhou,zyaztec@gmail.com
6
+ """
@@ -48,7 +48,7 @@ class DSSM(BaseMatchModel):
48
48
  embedding_l2_reg: float = 0.0,
49
49
  dense_l2_reg: float = 0.0,
50
50
  early_stop_patience: int = 20,
51
- model_id: str = 'dssm'):
51
+ **kwargs):
52
52
 
53
53
  super(DSSM, self).__init__(
54
54
  user_dense_features=user_dense_features,
@@ -67,7 +67,7 @@ class DSSM(BaseMatchModel):
67
67
  embedding_l2_reg=embedding_l2_reg,
68
68
  dense_l2_reg=dense_l2_reg,
69
69
  early_stop_patience=early_stop_patience,
70
- model_id=model_id
70
+ **kwargs
71
71
  )
72
72
 
73
73
  self.embedding_dim = embedding_dim
@@ -44,7 +44,7 @@ class DSSM_v2(BaseMatchModel):
44
44
  embedding_l2_reg: float = 0.0,
45
45
  dense_l2_reg: float = 0.0,
46
46
  early_stop_patience: int = 20,
47
- model_id: str = 'dssm_v2'):
47
+ **kwargs):
48
48
 
49
49
  super(DSSM_v2, self).__init__(
50
50
  user_dense_features=user_dense_features,
@@ -63,7 +63,7 @@ class DSSM_v2(BaseMatchModel):
63
63
  embedding_l2_reg=embedding_l2_reg,
64
64
  dense_l2_reg=dense_l2_reg,
65
65
  early_stop_patience=early_stop_patience,
66
- model_id=model_id
66
+ **kwargs
67
67
  )
68
68
 
69
69
  self.embedding_dim = embedding_dim
@@ -51,7 +51,7 @@ class MIND(BaseMatchModel):
51
51
  embedding_l2_reg: float = 0.0,
52
52
  dense_l2_reg: float = 0.0,
53
53
  early_stop_patience: int = 20,
54
- model_id: str = 'mind'):
54
+ **kwargs):
55
55
 
56
56
  super(MIND, self).__init__(
57
57
  user_dense_features=user_dense_features,
@@ -70,7 +70,7 @@ class MIND(BaseMatchModel):
70
70
  embedding_l2_reg=embedding_l2_reg,
71
71
  dense_l2_reg=dense_l2_reg,
72
72
  early_stop_patience=early_stop_patience,
73
- model_id=model_id
73
+ **kwargs
74
74
  )
75
75
 
76
76
  self.embedding_dim = embedding_dim
@@ -52,7 +52,7 @@ class SDM(BaseMatchModel):
52
52
  embedding_l2_reg: float = 0.0,
53
53
  dense_l2_reg: float = 0.0,
54
54
  early_stop_patience: int = 20,
55
- model_id: str = 'sdm'):
55
+ **kwargs):
56
56
 
57
57
  super(SDM, self).__init__(
58
58
  user_dense_features=user_dense_features,
@@ -71,7 +71,7 @@ class SDM(BaseMatchModel):
71
71
  embedding_l2_reg=embedding_l2_reg,
72
72
  dense_l2_reg=dense_l2_reg,
73
73
  early_stop_patience=early_stop_patience,
74
- model_id=model_id
74
+ **kwargs
75
75
  )
76
76
 
77
77
  self.embedding_dim = embedding_dim
@@ -50,7 +50,7 @@ class YoutubeDNN(BaseMatchModel):
50
50
  embedding_l2_reg: float = 0.0,
51
51
  dense_l2_reg: float = 0.0,
52
52
  early_stop_patience: int = 20,
53
- model_id: str = 'youtube_dnn'):
53
+ **kwargs):
54
54
 
55
55
  super(YoutubeDNN, self).__init__(
56
56
  user_dense_features=user_dense_features,
@@ -69,7 +69,7 @@ class YoutubeDNN(BaseMatchModel):
69
69
  embedding_l2_reg=embedding_l2_reg,
70
70
  dense_l2_reg=dense_l2_reg,
71
71
  early_stop_patience=early_stop_patience,
72
- model_id=model_id
72
+ **kwargs
73
73
  )
74
74
 
75
75
  self.embedding_dim = embedding_dim
@@ -47,11 +47,11 @@ class ESMM(BaseModel):
47
47
  optimizer_params: dict = {},
48
48
  loss: str | nn.Module | list[str | nn.Module] | None = "bce",
49
49
  device: str = 'cpu',
50
- model_id: str = "baseline",
51
50
  embedding_l1_reg=1e-6,
52
51
  dense_l1_reg=1e-5,
53
52
  embedding_l2_reg=1e-5,
54
- dense_l2_reg=1e-4):
53
+ dense_l2_reg=1e-4,
54
+ **kwargs):
55
55
 
56
56
  # ESMM requires exactly 2 targets: ctr and ctcvr
57
57
  if len(target) != 2:
@@ -69,7 +69,7 @@ class ESMM(BaseModel):
69
69
  embedding_l2_reg=embedding_l2_reg,
70
70
  dense_l2_reg=dense_l2_reg,
71
71
  early_stop_patience=20,
72
- model_id=model_id
72
+ **kwargs
73
73
  )
74
74
 
75
75
  self.loss = loss
@@ -45,11 +45,11 @@ class MMOE(BaseModel):
45
45
  optimizer_params: dict = {},
46
46
  loss: str | nn.Module | list[str | nn.Module] | None = "bce",
47
47
  device: str = 'cpu',
48
- model_id: str = "baseline",
49
48
  embedding_l1_reg=1e-6,
50
49
  dense_l1_reg=1e-5,
51
50
  embedding_l2_reg=1e-5,
52
- dense_l2_reg=1e-4):
51
+ dense_l2_reg=1e-4,
52
+ **kwargs):
53
53
 
54
54
  super(MMOE, self).__init__(
55
55
  dense_features=dense_features,
@@ -63,7 +63,7 @@ class MMOE(BaseModel):
63
63
  embedding_l2_reg=embedding_l2_reg,
64
64
  dense_l2_reg=dense_l2_reg,
65
65
  early_stop_patience=20,
66
- model_id=model_id
66
+ **kwargs
67
67
  )
68
68
 
69
69
  self.loss = loss
@@ -48,11 +48,11 @@ class PLE(BaseModel):
48
48
  optimizer_params: dict = {},
49
49
  loss: str | nn.Module | list[str | nn.Module] | None = "bce",
50
50
  device: str = 'cpu',
51
- model_id: str = "baseline",
52
51
  embedding_l1_reg=1e-6,
53
52
  dense_l1_reg=1e-5,
54
53
  embedding_l2_reg=1e-5,
55
- dense_l2_reg=1e-4):
54
+ dense_l2_reg=1e-4,
55
+ **kwargs):
56
56
 
57
57
  super(PLE, self).__init__(
58
58
  dense_features=dense_features,
@@ -66,7 +66,7 @@ class PLE(BaseModel):
66
66
  embedding_l2_reg=embedding_l2_reg,
67
67
  dense_l2_reg=dense_l2_reg,
68
68
  early_stop_patience=20,
69
- model_id=model_id
69
+ **kwargs
70
70
  )
71
71
 
72
72
  self.loss = loss
@@ -36,11 +36,11 @@ class ShareBottom(BaseModel):
36
36
  optimizer_params: dict = {},
37
37
  loss: str | nn.Module | list[str | nn.Module] | None = "bce",
38
38
  device: str = 'cpu',
39
- model_id: str = "baseline",
40
39
  embedding_l1_reg=1e-6,
41
40
  dense_l1_reg=1e-5,
42
41
  embedding_l2_reg=1e-5,
43
- dense_l2_reg=1e-4):
42
+ dense_l2_reg=1e-4,
43
+ **kwargs):
44
44
 
45
45
  super(ShareBottom, self).__init__(
46
46
  dense_features=dense_features,
@@ -54,7 +54,7 @@ class ShareBottom(BaseModel):
54
54
  embedding_l2_reg=embedding_l2_reg,
55
55
  dense_l2_reg=dense_l2_reg,
56
56
  early_stop_patience=20,
57
- model_id=model_id
57
+ **kwargs
58
58
  )
59
59
 
60
60
  self.loss = loss
@@ -35,11 +35,10 @@ class AFM(BaseModel):
35
35
  optimizer_params: dict = {},
36
36
  loss: str | nn.Module | None = "bce",
37
37
  device: str = 'cpu',
38
- model_id: str = "baseline",
39
38
  embedding_l1_reg=1e-6,
40
39
  dense_l1_reg=1e-5,
41
40
  embedding_l2_reg=1e-5,
42
- dense_l2_reg=1e-4):
41
+ dense_l2_reg=1e-4, **kwargs):
43
42
 
44
43
  super(AFM, self).__init__(
45
44
  dense_features=dense_features,
@@ -53,7 +52,7 @@ class AFM(BaseModel):
53
52
  embedding_l2_reg=embedding_l2_reg,
54
53
  dense_l2_reg=dense_l2_reg,
55
54
  early_stop_patience=20,
56
- model_id=model_id
55
+ **kwargs
57
56
  )
58
57
 
59
58
  self.loss = loss
@@ -40,11 +40,11 @@ class AutoInt(BaseModel):
40
40
  optimizer_params: dict = {},
41
41
  loss: str | nn.Module | None = "bce",
42
42
  device: str = 'cpu',
43
- model_id: str = "baseline",
44
43
  embedding_l1_reg=1e-6,
45
44
  dense_l1_reg=1e-5,
46
45
  embedding_l2_reg=1e-5,
47
- dense_l2_reg=1e-4):
46
+ dense_l2_reg=1e-4,
47
+ **kwargs):
48
48
 
49
49
  super(AutoInt, self).__init__(
50
50
  dense_features=dense_features,
@@ -58,7 +58,7 @@ class AutoInt(BaseModel):
58
58
  embedding_l2_reg=embedding_l2_reg,
59
59
  dense_l2_reg=dense_l2_reg,
60
60
  early_stop_patience=20,
61
- model_id=model_id
61
+ **kwargs
62
62
  )
63
63
 
64
64
  self.loss = loss
@@ -36,11 +36,11 @@ class DCN(BaseModel):
36
36
  optimizer_params: dict = {},
37
37
  loss: str | nn.Module | None = "bce",
38
38
  device: str = 'cpu',
39
- model_id: str = "baseline",
40
39
  embedding_l1_reg=1e-6,
41
40
  dense_l1_reg=1e-5,
42
41
  embedding_l2_reg=1e-5,
43
- dense_l2_reg=1e-4):
42
+ dense_l2_reg=1e-4,
43
+ **kwargs):
44
44
 
45
45
  super(DCN, self).__init__(
46
46
  dense_features=dense_features,
@@ -54,7 +54,7 @@ class DCN(BaseModel):
54
54
  embedding_l2_reg=embedding_l2_reg,
55
55
  dense_l2_reg=dense_l2_reg,
56
56
  early_stop_patience=20,
57
- model_id=model_id
57
+ **kwargs
58
58
  )
59
59
 
60
60
  self.loss = loss
@@ -32,11 +32,10 @@ class DeepFM(BaseModel):
32
32
  optimizer_params: dict = {},
33
33
  loss: str | nn.Module | None = "bce",
34
34
  device: str = 'cpu',
35
- model_id: str = "baseline",
36
35
  embedding_l1_reg=1e-6,
37
36
  dense_l1_reg=1e-5,
38
37
  embedding_l2_reg=1e-5,
39
- dense_l2_reg=1e-4):
38
+ dense_l2_reg=1e-4, **kwargs):
40
39
 
41
40
  super(DeepFM, self).__init__(
42
41
  dense_features=dense_features,
@@ -50,7 +49,7 @@ class DeepFM(BaseModel):
50
49
  embedding_l2_reg=embedding_l2_reg,
51
50
  dense_l2_reg=dense_l2_reg,
52
51
  early_stop_patience=20,
53
- model_id=model_id
52
+ **kwargs
54
53
  )
55
54
 
56
55
  self.loss = loss
@@ -39,11 +39,11 @@ class DIEN(BaseModel):
39
39
  optimizer_params: dict = {},
40
40
  loss: str | nn.Module | None = "bce",
41
41
  device: str = 'cpu',
42
- model_id: str = "baseline",
43
42
  embedding_l1_reg=1e-6,
44
43
  dense_l1_reg=1e-5,
45
44
  embedding_l2_reg=1e-5,
46
- dense_l2_reg=1e-4):
45
+ dense_l2_reg=1e-4,
46
+ **kwargs):
47
47
 
48
48
  super(DIEN, self).__init__(
49
49
  dense_features=dense_features,
@@ -57,7 +57,7 @@ class DIEN(BaseModel):
57
57
  embedding_l2_reg=embedding_l2_reg,
58
58
  dense_l2_reg=dense_l2_reg,
59
59
  early_stop_patience=20,
60
- model_id=model_id
60
+ **kwargs
61
61
  )
62
62
 
63
63
  self.loss = loss
@@ -38,11 +38,11 @@ class DIN(BaseModel):
38
38
  optimizer_params: dict = {},
39
39
  loss: str | nn.Module | None = "bce",
40
40
  device: str = 'cpu',
41
- model_id: str = "baseline",
42
41
  embedding_l1_reg=1e-6,
43
42
  dense_l1_reg=1e-5,
44
43
  embedding_l2_reg=1e-5,
45
- dense_l2_reg=1e-4):
44
+ dense_l2_reg=1e-4,
45
+ **kwargs):
46
46
 
47
47
  super(DIN, self).__init__(
48
48
  dense_features=dense_features,
@@ -56,7 +56,7 @@ class DIN(BaseModel):
56
56
  embedding_l2_reg=embedding_l2_reg,
57
57
  dense_l2_reg=dense_l2_reg,
58
58
  early_stop_patience=20,
59
- model_id=model_id
59
+ **kwargs
60
60
  )
61
61
 
62
62
  self.loss = loss
@@ -43,11 +43,11 @@ class FiBiNET(BaseModel):
43
43
  optimizer_params: dict = {},
44
44
  loss: str | nn.Module | None = "bce",
45
45
  device: str = 'cpu',
46
- model_id: str = "baseline",
47
46
  embedding_l1_reg=1e-6,
48
47
  dense_l1_reg=1e-5,
49
48
  embedding_l2_reg=1e-5,
50
- dense_l2_reg=1e-4):
49
+ dense_l2_reg=1e-4,
50
+ **kwargs):
51
51
 
52
52
  super(FiBiNET, self).__init__(
53
53
  dense_features=dense_features,
@@ -61,7 +61,7 @@ class FiBiNET(BaseModel):
61
61
  embedding_l2_reg=embedding_l2_reg,
62
62
  dense_l2_reg=dense_l2_reg,
63
63
  early_stop_patience=20,
64
- model_id=model_id
64
+ **kwargs
65
65
  )
66
66
 
67
67
  self.loss = loss
@@ -31,11 +31,11 @@ class FM(BaseModel):
31
31
  optimizer_params: dict = {},
32
32
  loss: str | nn.Module | None = "bce",
33
33
  device: str = 'cpu',
34
- model_id: str = "baseline",
35
34
  embedding_l1_reg=1e-6,
36
35
  dense_l1_reg=1e-5,
37
36
  embedding_l2_reg=1e-5,
38
- dense_l2_reg=1e-4):
37
+ dense_l2_reg=1e-4,
38
+ **kwargs):
39
39
 
40
40
  super(FM, self).__init__(
41
41
  dense_features=dense_features,
@@ -49,7 +49,7 @@ class FM(BaseModel):
49
49
  embedding_l2_reg=embedding_l2_reg,
50
50
  dense_l2_reg=dense_l2_reg,
51
51
  early_stop_patience=20,
52
- model_id=model_id
52
+ **kwargs
53
53
  )
54
54
 
55
55
  self.loss = loss
@@ -37,11 +37,11 @@ class MaskNet(BaseModel):
37
37
  optimizer_params: dict = {},
38
38
  loss: str | nn.Module | None = "bce",
39
39
  device: str = 'cpu',
40
- model_id: str = "baseline",
41
40
  embedding_l1_reg=1e-6,
42
41
  dense_l1_reg=1e-5,
43
42
  embedding_l2_reg=1e-5,
44
- dense_l2_reg=1e-4):
43
+ dense_l2_reg=1e-4,
44
+ **kwargs):
45
45
 
46
46
  super(MaskNet, self).__init__(
47
47
  dense_features=dense_features,
@@ -55,7 +55,7 @@ class MaskNet(BaseModel):
55
55
  embedding_l2_reg=embedding_l2_reg,
56
56
  dense_l2_reg=dense_l2_reg,
57
57
  early_stop_patience=20,
58
- model_id=model_id
58
+ **kwargs
59
59
  )
60
60
 
61
61
  self.loss = loss
@@ -35,11 +35,11 @@ class PNN(BaseModel):
35
35
  optimizer_params: dict = {},
36
36
  loss: str | nn.Module | None = "bce",
37
37
  device: str = 'cpu',
38
- model_id: str = "baseline",
39
38
  embedding_l1_reg=1e-6,
40
39
  dense_l1_reg=1e-5,
41
40
  embedding_l2_reg=1e-5,
42
- dense_l2_reg=1e-4):
41
+ dense_l2_reg=1e-4,
42
+ **kwargs):
43
43
 
44
44
  super(PNN, self).__init__(
45
45
  dense_features=dense_features,
@@ -53,7 +53,7 @@ class PNN(BaseModel):
53
53
  embedding_l2_reg=embedding_l2_reg,
54
54
  dense_l2_reg=dense_l2_reg,
55
55
  early_stop_patience=20,
56
- model_id=model_id
56
+ **kwargs
57
57
  )
58
58
 
59
59
  self.loss = loss
@@ -35,11 +35,11 @@ class WideDeep(BaseModel):
35
35
  optimizer_params: dict = {},
36
36
  loss: str | nn.Module | None = "bce",
37
37
  device: str = 'cpu',
38
- model_id: str = "baseline",
39
38
  embedding_l1_reg=1e-6,
40
39
  dense_l1_reg=1e-5,
41
40
  embedding_l2_reg=1e-5,
42
- dense_l2_reg=1e-4):
41
+ dense_l2_reg=1e-4,
42
+ **kwargs):
43
43
 
44
44
  super(WideDeep, self).__init__(
45
45
  dense_features=dense_features,
@@ -53,7 +53,7 @@ class WideDeep(BaseModel):
53
53
  embedding_l2_reg=embedding_l2_reg,
54
54
  dense_l2_reg=dense_l2_reg,
55
55
  early_stop_patience=20,
56
- model_id=model_id
56
+ **kwargs
57
57
  )
58
58
 
59
59
  self.loss = loss
@@ -38,11 +38,11 @@ class xDeepFM(BaseModel):
38
38
  optimizer_params: dict = {},
39
39
  loss: str | nn.Module | None = "bce",
40
40
  device: str = 'cpu',
41
- model_id: str = "baseline",
42
41
  embedding_l1_reg=1e-6,
43
42
  dense_l1_reg=1e-5,
44
43
  embedding_l2_reg=1e-5,
45
- dense_l2_reg=1e-4):
44
+ dense_l2_reg=1e-4,
45
+ **kwargs):
46
46
 
47
47
  super(xDeepFM, self).__init__(
48
48
  dense_features=dense_features,
@@ -56,7 +56,7 @@ class xDeepFM(BaseModel):
56
56
  embedding_l2_reg=embedding_l2_reg,
57
57
  dense_l2_reg=dense_l2_reg,
58
58
  early_stop_patience=20,
59
- model_id=model_id
59
+ **kwargs
60
60
  )
61
61
 
62
62
  self.loss = loss
nextrec/utils/__init__.py CHANGED
@@ -1,18 +1,14 @@
1
- from nextrec.utils.optimizer import get_optimizer_fn, get_scheduler_fn
2
- from nextrec.utils.initializer import get_initializer_fn
3
- from nextrec.utils.embedding import get_auto_embedding_dim
4
- from nextrec.utils.common import get_task_type
5
-
6
- from nextrec.utils import optimizer, initializer, embedding, common
1
+ from .optimizer import get_optimizer_fn, get_scheduler_fn
2
+ from .initializer import get_initializer_fn
3
+ from .embedding import get_auto_embedding_dim
4
+ from . import optimizer, initializer, embedding
7
5
 
8
6
  __all__ = [
9
7
  'get_optimizer_fn',
10
8
  'get_scheduler_fn',
11
9
  'get_initializer_fn',
12
10
  'get_auto_embedding_dim',
13
- 'get_task_type',
14
11
  'optimizer',
15
12
  'initializer',
16
13
  'embedding',
17
- 'common',
18
14
  ]
@@ -2,8 +2,7 @@
2
2
  Embedding utilities for NextRec
3
3
 
4
4
  Date: create on 13/11/2025
5
- Author:
6
- Yang Zhou, zyaztec@gmail.com
5
+ Author: Yang Zhou, zyaztec@gmail.com
7
6
  """
8
7
 
9
8
  import numpy as np
@@ -13,7 +12,6 @@ def get_auto_embedding_dim(num_classes: int) -> int:
13
12
  """
14
13
  Calculate the dim of embedding vector according to number of classes in the category.
15
14
  Formula: emb_dim = [6 * (num_classes)^(1/4)]
16
- Reference:
17
- Deep & Cross Network for Ad Click Predictions.(ADKDD'17)
15
+ Reference: Deep & Cross Network for Ad Click Predictions.(ADKDD'17)
18
16
  """
19
17
  return int(np.floor(6 * np.power(num_classes, 0.25)))
@@ -2,8 +2,7 @@
2
2
  Initialization utilities for NextRec
3
3
 
4
4
  Date: create on 13/11/2025
5
- Author:
6
- Yang Zhou, zyaztec@gmail.com
5
+ Author: Yang Zhou, zyaztec@gmail.com
7
6
  """
8
7
 
9
8
  import torch.nn as nn
@@ -2,8 +2,7 @@
2
2
  Optimizer and Scheduler utilities for NextRec
3
3
 
4
4
  Date: create on 13/11/2025
5
- Author:
6
- Yang Zhou, zyaztec@gmail.com
5
+ Author: Yang Zhou, zyaztec@gmail.com
7
6
  """
8
7
 
9
8
  import torch
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nextrec
3
- Version: 0.1.10
3
+ Version: 0.2.1
4
4
  Summary: A comprehensive recommendation library with match, ranking, and multi-task learning models
5
5
  Project-URL: Homepage, https://github.com/zerolovesea/NextRec
6
6
  Project-URL: Repository, https://github.com/zerolovesea/NextRec
@@ -61,9 +61,9 @@ Description-Content-Type: text/markdown
61
61
  ![Python](https://img.shields.io/badge/Python-3.10+-blue.svg)
62
62
  ![PyTorch](https://img.shields.io/badge/PyTorch-1.10+-ee4c2c.svg)
63
63
  ![License](https://img.shields.io/badge/License-Apache%202.0-green.svg)
64
- ![Version](https://img.shields.io/badge/Version-0.1.0-orange.svg)
64
+ ![Version](https://img.shields.io/badge/Version-0.2.1-orange.svg)
65
65
 
66
- [中文版](README_zh.md)
66
+ English | [中文版](README_zh.md)
67
67
 
68
68
  **A Unified, Efficient, and Scalable Recommendation System Framework**
69
69
 
@@ -123,7 +123,7 @@ model = DeepFM(
123
123
  mlp_params={"dims": [256, 128], "activation": "relu", "dropout": 0.5},
124
124
  target=target,
125
125
  device='cpu',
126
- model_id="deepfm_with_processor",
126
+ session_id="deepfm_with_processor",
127
127
  embedding_l1_reg=1e-6,
128
128
  dense_l1_reg=1e-5,
129
129
  embedding_l2_reg=1e-5,
@@ -269,7 +269,6 @@ NextRec is inspired by the following great open-source projects:
269
269
  - **torch-rechub** - A Lighting Pytorch Framework for Recommendation Models, Easy-to-use and Easy-to-extend.
270
270
  - **FuxiCTR** — Configurable and reproducible CTR prediction library
271
271
  - **RecBole** — Unified and efficient recommendation library
272
- - **PaddleRec** — Large-scale recommendation algorithm library
273
272
 
274
273
  Special thanks to all open-source contributors!
275
274