nextrec 0.4.21__py3-none-any.whl → 0.4.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nextrec/__version__.py +1 -1
- nextrec/basic/activation.py +1 -1
- nextrec/basic/heads.py +2 -3
- nextrec/basic/model.py +54 -14
- nextrec/loss/__init__.py +0 -4
- nextrec/loss/grad_norm.py +3 -3
- nextrec/utils/__init__.py +5 -1
- nextrec/{loss/loss_utils.py → utils/loss.py} +17 -7
- nextrec/utils/types.py +27 -23
- {nextrec-0.4.21.dist-info → nextrec-0.4.22.dist-info}/METADATA +5 -5
- {nextrec-0.4.21.dist-info → nextrec-0.4.22.dist-info}/RECORD +14 -14
- {nextrec-0.4.21.dist-info → nextrec-0.4.22.dist-info}/WHEEL +0 -0
- {nextrec-0.4.21.dist-info → nextrec-0.4.22.dist-info}/entry_points.txt +0 -0
- {nextrec-0.4.21.dist-info → nextrec-0.4.22.dist-info}/licenses/LICENSE +0 -0
nextrec/__version__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.4.
|
|
1
|
+
__version__ = "0.4.22"
|
nextrec/basic/activation.py
CHANGED
nextrec/basic/heads.py
CHANGED
|
@@ -15,6 +15,7 @@ import torch.nn as nn
|
|
|
15
15
|
import torch.nn.functional as F
|
|
16
16
|
|
|
17
17
|
from nextrec.basic.layers import PredictionLayer
|
|
18
|
+
from nextrec.utils.types import TaskTypeName
|
|
18
19
|
|
|
19
20
|
|
|
20
21
|
class TaskHead(nn.Module):
|
|
@@ -27,9 +28,7 @@ class TaskHead(nn.Module):
|
|
|
27
28
|
|
|
28
29
|
def __init__(
|
|
29
30
|
self,
|
|
30
|
-
task_type:
|
|
31
|
-
Literal["binary", "regression"] | list[Literal["binary", "regression"]]
|
|
32
|
-
) = "binary",
|
|
31
|
+
task_type: TaskTypeName | list[TaskTypeName] = "binary",
|
|
33
32
|
task_dims: int | list[int] | None = None,
|
|
34
33
|
use_bias: bool = True,
|
|
35
34
|
return_logits: bool = False,
|
nextrec/basic/model.py
CHANGED
|
@@ -60,8 +60,8 @@ from nextrec.loss import (
|
|
|
60
60
|
InfoNCELoss,
|
|
61
61
|
SampledSoftmaxLoss,
|
|
62
62
|
TripletLoss,
|
|
63
|
-
get_loss_fn,
|
|
64
63
|
)
|
|
64
|
+
from nextrec.utils.loss import get_loss_fn
|
|
65
65
|
from nextrec.loss.grad_norm import get_grad_norm_shared_params
|
|
66
66
|
from nextrec.utils.console import display_metrics_table, progress
|
|
67
67
|
from nextrec.utils.torch_utils import (
|
|
@@ -75,7 +75,13 @@ from nextrec.utils.torch_utils import (
|
|
|
75
75
|
)
|
|
76
76
|
from nextrec.utils.config import safe_value
|
|
77
77
|
from nextrec.utils.model import compute_ranking_loss
|
|
78
|
-
from nextrec.utils.types import
|
|
78
|
+
from nextrec.utils.types import (
|
|
79
|
+
LossName,
|
|
80
|
+
OptimizerName,
|
|
81
|
+
SchedulerName,
|
|
82
|
+
TrainingModeName,
|
|
83
|
+
TaskTypeName,
|
|
84
|
+
)
|
|
79
85
|
|
|
80
86
|
|
|
81
87
|
class BaseModel(SummarySet, FeatureSet, nn.Module):
|
|
@@ -94,11 +100,8 @@ class BaseModel(SummarySet, FeatureSet, nn.Module):
|
|
|
94
100
|
sequence_features: list[SequenceFeature] | None = None,
|
|
95
101
|
target: list[str] | str | None = None,
|
|
96
102
|
id_columns: list[str] | str | None = None,
|
|
97
|
-
task:
|
|
98
|
-
training_mode:
|
|
99
|
-
Literal["pointwise", "pairwise", "listwise"]
|
|
100
|
-
| list[Literal["pointwise", "pairwise", "listwise"]]
|
|
101
|
-
) = "pointwise",
|
|
103
|
+
task: TaskTypeName | list[TaskTypeName] | None = None,
|
|
104
|
+
training_mode: TrainingModeName | list[TrainingModeName] = "pointwise",
|
|
102
105
|
embedding_l1_reg: float = 0.0,
|
|
103
106
|
dense_l1_reg: float = 0.0,
|
|
104
107
|
embedding_l2_reg: float = 0.0,
|
|
@@ -179,8 +182,7 @@ class BaseModel(SummarySet, FeatureSet, nn.Module):
|
|
|
179
182
|
else:
|
|
180
183
|
training_modes = [training_mode] * self.nums_task
|
|
181
184
|
if any(
|
|
182
|
-
mode not in {"pointwise", "pairwise", "listwise"}
|
|
183
|
-
for mode in training_modes
|
|
185
|
+
mode not in {"pointwise", "pairwise", "listwise"} for mode in training_modes
|
|
184
186
|
):
|
|
185
187
|
raise ValueError(
|
|
186
188
|
"[BaseModel-init Error] training_mode must be one of {'pointwise', 'pairwise', 'listwise'}."
|
|
@@ -195,6 +197,7 @@ class BaseModel(SummarySet, FeatureSet, nn.Module):
|
|
|
195
197
|
self.regularization_weights = []
|
|
196
198
|
self.embedding_params = []
|
|
197
199
|
self.loss_weight = None
|
|
200
|
+
self.ignore_label = None
|
|
198
201
|
|
|
199
202
|
self.max_gradient_norm = 1.0
|
|
200
203
|
self.logger_initialized = False
|
|
@@ -407,6 +410,7 @@ class BaseModel(SummarySet, FeatureSet, nn.Module):
|
|
|
407
410
|
loss: LossName | nn.Module | list[LossName | nn.Module] | None = "bce",
|
|
408
411
|
loss_params: dict | list[dict] | None = None,
|
|
409
412
|
loss_weights: int | float | list[int | float] | dict | str | None = None,
|
|
413
|
+
ignore_label: int | float | None = -1,
|
|
410
414
|
):
|
|
411
415
|
"""
|
|
412
416
|
Configure the model for training.
|
|
@@ -419,7 +423,9 @@ class BaseModel(SummarySet, FeatureSet, nn.Module):
|
|
|
419
423
|
loss_params: Loss function parameters, or list for multi-task. e.g., {'weight': tensor([0.25, 0.75])}.
|
|
420
424
|
loss_weights: Weights for each task loss, int/float for single-task or list for multi-task. e.g., 1.0, or [1.0, 0.5].
|
|
421
425
|
Use "grad_norm" or {"method": "grad_norm", ...} to enable GradNorm for multi-task loss balancing.
|
|
426
|
+
ignore_label: Label value to ignore when computing loss. Use this to skip gradients for unknown labels.
|
|
422
427
|
"""
|
|
428
|
+
self.ignore_label = ignore_label
|
|
423
429
|
default_losses = {
|
|
424
430
|
"pointwise": "bce",
|
|
425
431
|
"pairwise": "bpr",
|
|
@@ -540,6 +546,7 @@ class BaseModel(SummarySet, FeatureSet, nn.Module):
|
|
|
540
546
|
raise ValueError(
|
|
541
547
|
"[BaseModel-compute_loss Error] Ground truth labels (y_true) are required."
|
|
542
548
|
)
|
|
549
|
+
|
|
543
550
|
# single-task
|
|
544
551
|
if self.nums_task == 1:
|
|
545
552
|
if y_pred.dim() == 1:
|
|
@@ -547,13 +554,24 @@ class BaseModel(SummarySet, FeatureSet, nn.Module):
|
|
|
547
554
|
if y_true.dim() == 1:
|
|
548
555
|
y_true = y_true.view(-1, 1)
|
|
549
556
|
if y_pred.shape != y_true.shape:
|
|
550
|
-
raise ValueError(f"Shape mismatch: {y_pred.shape} vs {y_true.shape}")
|
|
551
|
-
loss_fn = self.loss_fn[0] if getattr(self, "loss_fn", None) else None
|
|
552
|
-
if loss_fn is None:
|
|
553
557
|
raise ValueError(
|
|
554
|
-
"[BaseModel-compute_loss Error]
|
|
558
|
+
f"[BaseModel-compute_loss Error] Shape mismatch: {y_pred.shape} vs {y_true.shape}"
|
|
555
559
|
)
|
|
560
|
+
|
|
561
|
+
loss_fn = self.loss_fn[0]
|
|
562
|
+
|
|
563
|
+
if self.ignore_label is not None:
|
|
564
|
+
valid_mask = y_true != self.ignore_label
|
|
565
|
+
if valid_mask.dim() > 1:
|
|
566
|
+
valid_mask = valid_mask.all(dim=1)
|
|
567
|
+
if not torch.any(valid_mask): # if no valid labels, return zero loss
|
|
568
|
+
return y_pred.sum() * 0.0
|
|
569
|
+
|
|
570
|
+
y_pred = y_pred[valid_mask]
|
|
571
|
+
y_true = y_true[valid_mask]
|
|
572
|
+
|
|
556
573
|
mode = self.training_modes[0]
|
|
574
|
+
|
|
557
575
|
task_dim = (
|
|
558
576
|
self.task_dims[0] if hasattr(self, "task_dims") else y_pred.shape[1] # type: ignore
|
|
559
577
|
)
|
|
@@ -584,7 +602,25 @@ class BaseModel(SummarySet, FeatureSet, nn.Module):
|
|
|
584
602
|
for i, (start, end) in enumerate(slices): # type: ignore
|
|
585
603
|
y_pred_i = y_pred[:, start:end]
|
|
586
604
|
y_true_i = y_true[:, start:end]
|
|
605
|
+
total_count = y_true_i.shape[0]
|
|
606
|
+
# valid_count = None
|
|
607
|
+
|
|
608
|
+
# mask ignored labels
|
|
609
|
+
if self.ignore_label is not None:
|
|
610
|
+
valid_mask = y_true_i != self.ignore_label
|
|
611
|
+
if valid_mask.dim() > 1:
|
|
612
|
+
valid_mask = valid_mask.all(dim=1)
|
|
613
|
+
if not torch.any(valid_mask):
|
|
614
|
+
task_losses.append(y_pred_i.sum() * 0.0)
|
|
615
|
+
continue
|
|
616
|
+
# valid_count = valid_mask.sum().to(dtype=y_true_i.dtype)
|
|
617
|
+
y_pred_i = y_pred_i[valid_mask]
|
|
618
|
+
y_true_i = y_true_i[valid_mask]
|
|
619
|
+
# else:
|
|
620
|
+
# valid_count = y_true_i.new_tensor(float(total_count))
|
|
621
|
+
|
|
587
622
|
mode = self.training_modes[i]
|
|
623
|
+
|
|
588
624
|
if mode in {"pairwise", "listwise"}:
|
|
589
625
|
task_loss = compute_ranking_loss(
|
|
590
626
|
training_mode=mode,
|
|
@@ -594,7 +630,11 @@ class BaseModel(SummarySet, FeatureSet, nn.Module):
|
|
|
594
630
|
)
|
|
595
631
|
else:
|
|
596
632
|
task_loss = self.loss_fn[i](y_pred_i, y_true_i)
|
|
633
|
+
# task_loss = normalize_task_loss(
|
|
634
|
+
# task_loss, valid_count, total_count
|
|
635
|
+
# ) # normalize by valid samples to avoid loss scale issues
|
|
597
636
|
task_losses.append(task_loss)
|
|
637
|
+
|
|
598
638
|
if self.grad_norm is not None:
|
|
599
639
|
if self.grad_norm_shared_params is None:
|
|
600
640
|
self.grad_norm_shared_params = get_grad_norm_shared_params(
|
|
@@ -2164,7 +2204,7 @@ class BaseMatchModel(BaseModel):
|
|
|
2164
2204
|
scheduler_params: Parameters for the scheduler. e.g., {'step_size': 10, 'gamma': 0.1}.
|
|
2165
2205
|
loss: Loss function(s) to use (name, instance, or list). e.g., 'bce'.
|
|
2166
2206
|
loss_params: Parameters for the loss function(s). e.g., {'reduction': 'mean'}.
|
|
2167
|
-
loss_weights: Weights for the loss function(s). e.g., 1.0 or [0.7, 0.3].
|
|
2207
|
+
loss_weights: Weights for the loss function(s). e.g., 1.0 or [0.7, 0.3].
|
|
2168
2208
|
"""
|
|
2169
2209
|
if self.training_mode not in self.support_training_modes:
|
|
2170
2210
|
raise ValueError(
|
nextrec/loss/__init__.py
CHANGED
|
@@ -6,7 +6,6 @@ from nextrec.loss.listwise import (
|
|
|
6
6
|
SampledSoftmaxLoss,
|
|
7
7
|
)
|
|
8
8
|
from nextrec.loss.grad_norm import GradNormLossWeighting
|
|
9
|
-
from nextrec.loss.loss_utils import VALID_TASK_TYPES, get_loss_fn, get_loss_kwargs
|
|
10
9
|
from nextrec.loss.pairwise import BPRLoss, HingeLoss, TripletLoss
|
|
11
10
|
from nextrec.loss.pointwise import (
|
|
12
11
|
ClassBalancedFocalLoss,
|
|
@@ -34,7 +33,4 @@ __all__ = [
|
|
|
34
33
|
# Multi-task weighting
|
|
35
34
|
"GradNormLossWeighting",
|
|
36
35
|
# Utilities
|
|
37
|
-
"get_loss_fn",
|
|
38
|
-
"get_loss_kwargs",
|
|
39
|
-
"VALID_TASK_TYPES",
|
|
40
36
|
]
|
nextrec/loss/grad_norm.py
CHANGED
|
@@ -20,9 +20,9 @@ import torch.nn.functional as F
|
|
|
20
20
|
|
|
21
21
|
|
|
22
22
|
def get_grad_norm_shared_params(
|
|
23
|
-
model
|
|
24
|
-
shared_modules
|
|
25
|
-
)
|
|
23
|
+
model,
|
|
24
|
+
shared_modules=None,
|
|
25
|
+
):
|
|
26
26
|
if not shared_modules:
|
|
27
27
|
return [p for p in model.parameters() if p.requires_grad]
|
|
28
28
|
shared_params = []
|
nextrec/utils/__init__.py
CHANGED
|
@@ -6,7 +6,7 @@ Last update: 19/12/2025
|
|
|
6
6
|
Author: Yang Zhou, zyaztec@gmail.com
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
|
-
from . import console, data, embedding, torch_utils
|
|
9
|
+
from . import console, data, embedding, loss, torch_utils
|
|
10
10
|
from .config import (
|
|
11
11
|
build_feature_objects,
|
|
12
12
|
build_model_instance,
|
|
@@ -38,6 +38,7 @@ from .data import (
|
|
|
38
38
|
from .embedding import get_auto_embedding_dim
|
|
39
39
|
from .feature import to_list
|
|
40
40
|
from .model import compute_pair_scores, get_mlp_output_dim, merge_features
|
|
41
|
+
from .loss import normalize_task_loss
|
|
41
42
|
from .torch_utils import (
|
|
42
43
|
add_distributed_sampler,
|
|
43
44
|
get_device,
|
|
@@ -81,6 +82,8 @@ __all__ = [
|
|
|
81
82
|
"merge_features",
|
|
82
83
|
"get_mlp_output_dim",
|
|
83
84
|
"compute_pair_scores",
|
|
85
|
+
# Loss utilities
|
|
86
|
+
"normalize_task_loss",
|
|
84
87
|
# Feature utilities
|
|
85
88
|
"to_list",
|
|
86
89
|
# Config utilities
|
|
@@ -101,6 +104,7 @@ __all__ = [
|
|
|
101
104
|
"console",
|
|
102
105
|
"data",
|
|
103
106
|
"embedding",
|
|
107
|
+
"loss",
|
|
104
108
|
"torch_utils",
|
|
105
109
|
# Type aliases
|
|
106
110
|
"OptimizerName",
|
|
@@ -1,11 +1,13 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Loss utilities for NextRec.
|
|
3
3
|
|
|
4
|
-
Date: create on
|
|
5
|
-
Checkpoint: edit on 19/12/2025
|
|
4
|
+
Date: create on 28/12/2025
|
|
6
5
|
Author: Yang Zhou, zyaztec@gmail.com
|
|
7
6
|
"""
|
|
8
7
|
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import torch
|
|
9
11
|
import torch.nn as nn
|
|
10
12
|
|
|
11
13
|
from nextrec.loss.listwise import (
|
|
@@ -19,11 +21,19 @@ from nextrec.loss.pairwise import BPRLoss, HingeLoss, TripletLoss
|
|
|
19
21
|
from nextrec.loss.pointwise import ClassBalancedFocalLoss, FocalLoss, WeightedBCELoss
|
|
20
22
|
from nextrec.utils.types import LossName
|
|
21
23
|
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
24
|
+
|
|
25
|
+
def normalize_task_loss(
|
|
26
|
+
task_loss,
|
|
27
|
+
valid_count,
|
|
28
|
+
total_count,
|
|
29
|
+
eps=1e-8,
|
|
30
|
+
) -> torch.Tensor:
|
|
31
|
+
if not torch.is_tensor(valid_count):
|
|
32
|
+
valid_count = torch.tensor(float(valid_count), device=task_loss.device)
|
|
33
|
+
if not torch.is_tensor(total_count):
|
|
34
|
+
total_count = torch.tensor(float(total_count), device=task_loss.device)
|
|
35
|
+
scale = valid_count.to(task_loss.dtype) / (total_count.to(task_loss.dtype) + eps)
|
|
36
|
+
return task_loss * scale
|
|
27
37
|
|
|
28
38
|
|
|
29
39
|
def build_cb_focal(kw):
|
nextrec/utils/types.py
CHANGED
|
@@ -34,26 +34,30 @@ LossName = Literal[
|
|
|
34
34
|
]
|
|
35
35
|
|
|
36
36
|
ActivationName = Literal[
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
37
|
+
"dice",
|
|
38
|
+
"relu",
|
|
39
|
+
"relu6",
|
|
40
|
+
"elu",
|
|
41
|
+
"selu",
|
|
42
|
+
"leaky_relu",
|
|
43
|
+
"prelu",
|
|
44
|
+
"gelu",
|
|
45
|
+
"sigmoid",
|
|
46
|
+
"tanh",
|
|
47
|
+
"softplus",
|
|
48
|
+
"softsign",
|
|
49
|
+
"hardswish",
|
|
50
|
+
"mish",
|
|
51
|
+
"silu",
|
|
52
|
+
"swish",
|
|
53
|
+
"hardsigmoid",
|
|
54
|
+
"tanhshrink",
|
|
55
|
+
"softshrink",
|
|
56
|
+
"none",
|
|
57
|
+
"linear",
|
|
58
|
+
"identity",
|
|
59
|
+
]
|
|
60
|
+
|
|
61
|
+
TrainingModeName = Literal["pointwise", "pairwise", "listwise"]
|
|
62
|
+
|
|
63
|
+
TaskTypeName = Literal["binary", "regression"]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: nextrec
|
|
3
|
-
Version: 0.4.
|
|
3
|
+
Version: 0.4.22
|
|
4
4
|
Summary: A comprehensive recommendation library with match, ranking, and multi-task learning models
|
|
5
5
|
Project-URL: Homepage, https://github.com/zerolovesea/NextRec
|
|
6
6
|
Project-URL: Repository, https://github.com/zerolovesea/NextRec
|
|
@@ -65,11 +65,11 @@ Description-Content-Type: text/markdown
|
|
|
65
65
|
|
|
66
66
|
<div align="center">
|
|
67
67
|
|
|
68
|
-
[](https://pypistats.org/packages/nextrec)
|
|
69
69
|

|
|
70
70
|

|
|
71
71
|

|
|
72
|
-

|
|
73
73
|
[](https://deepwiki.com/zerolovesea/NextRec)
|
|
74
74
|
|
|
75
75
|
中文文档 | [English Version](README_en.md)
|
|
@@ -247,11 +247,11 @@ nextrec --mode=predict --predict_config=path/to/predict_config.yaml
|
|
|
247
247
|
|
|
248
248
|
预测结果固定保存到 `{checkpoint_path}/predictions/{name}.{save_data_format}`。
|
|
249
249
|
|
|
250
|
-
> 截止当前版本0.4.
|
|
250
|
+
> 截止当前版本0.4.22,NextRec CLI支持单机训练,分布式训练相关功能尚在开发中。
|
|
251
251
|
|
|
252
252
|
## 兼容平台
|
|
253
253
|
|
|
254
|
-
当前最新版本为0.4.
|
|
254
|
+
当前最新版本为0.4.22,所有模型和测试代码均已在以下平台通过验证,如果开发者在使用中遇到兼容问题,请在issue区提出错误报告及系统版本:
|
|
255
255
|
|
|
256
256
|
| 平台 | 配置 |
|
|
257
257
|
|------|------|
|
|
@@ -1,15 +1,15 @@
|
|
|
1
1
|
nextrec/__init__.py,sha256=_M3oUqyuvQ5k8Th_3wId6hQ_caclh7M5ad51XN09m98,235
|
|
2
|
-
nextrec/__version__.py,sha256=
|
|
2
|
+
nextrec/__version__.py,sha256=dTG-jzL6gT1bOj-aXPys00hnioYMNGLuF9EnbsEF7HI,23
|
|
3
3
|
nextrec/cli.py,sha256=Vm1XCFVw1vFh9NFw3PYZ_fYbh07tf45fl3RtPycooUI,24317
|
|
4
4
|
nextrec/basic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
5
|
-
nextrec/basic/activation.py,sha256=
|
|
5
|
+
nextrec/basic/activation.py,sha256=uekcJsOy8SiT0_NaDO2VNSStyYFzVikDFVLDk-VrjwQ,2949
|
|
6
6
|
nextrec/basic/callback.py,sha256=7geza5iMMlMojlrIKH5A7nzvCe4IYwgUaMRh_xpblWk,12585
|
|
7
7
|
nextrec/basic/features.py,sha256=zLijBNkKwCXv9TKxSWwvmt7aVfWn2D5JvfwukeIRqec,9174
|
|
8
|
-
nextrec/basic/heads.py,sha256=
|
|
8
|
+
nextrec/basic/heads.py,sha256=BshykLxD41KxKuZaBxf4Fmy1Mc52b3ioJliN1BVaGlk,3374
|
|
9
9
|
nextrec/basic/layers.py,sha256=74RZiyYgiY9YFb2hWWNEBdWjvx2bXzCF3WtJJeSDtXQ,37857
|
|
10
10
|
nextrec/basic/loggers.py,sha256=KxTPVHtkebAbpxZIYZ4aqncZCu-dccpKtIxmi2bVs6o,13160
|
|
11
11
|
nextrec/basic/metrics.py,sha256=1r6efTc9TpARNBt5X9ISoppTZflej6EdFkjPYHV-YZI,23162
|
|
12
|
-
nextrec/basic/model.py,sha256=
|
|
12
|
+
nextrec/basic/model.py,sha256=dta6yBqsMSu7RMyY7CIyYvJwberv53EMFCkXYIhLUuw,105211
|
|
13
13
|
nextrec/basic/session.py,sha256=mrIsjRJhmvcAfoO1pXX-KB3SK5CCgz89wH8XDoAiGEI,4475
|
|
14
14
|
nextrec/basic/summary.py,sha256=I6vNc-W_mVo7EogsFUTXf20bWYMVnTab60Zs6wSxsdc,14406
|
|
15
15
|
nextrec/data/__init__.py,sha256=YZQjpty1pDCM7q_YNmiA2sa5kbujUw26ObLHWjMPjKY,1194
|
|
@@ -18,10 +18,9 @@ nextrec/data/data_processing.py,sha256=ZDZMSTBvxjPppl872is4M49o4WAkZXw2vUFOsNr0q
|
|
|
18
18
|
nextrec/data/data_utils.py,sha256=0Ls1cnG9lBz0ovtyedw5vwp7WegGK_iF-F8e_3DEddo,880
|
|
19
19
|
nextrec/data/dataloader.py,sha256=43eTLqhWKcJdrFiGzlrz8zIgLAQsylRQ_DOklPNmKr4,18993
|
|
20
20
|
nextrec/data/preprocessor.py,sha256=pilzqbluAn1QykeBVPxvnQcbRUuZr3aX9hCQqey--Ks,47245
|
|
21
|
-
nextrec/loss/__init__.py,sha256=
|
|
22
|
-
nextrec/loss/grad_norm.py,sha256=
|
|
21
|
+
nextrec/loss/__init__.py,sha256=rualGsY-IBvmM52q9eOBk0MyKcMkpkazcscOeDXi_SM,774
|
|
22
|
+
nextrec/loss/grad_norm.py,sha256=YoE_XSIN1HOUcNq1dpfkIlWtMaB5Pu-SEWDaNgtRw1M,8316
|
|
23
23
|
nextrec/loss/listwise.py,sha256=UT9vJCOTOQLogVwaeTV7Z5uxIYnngGdxk-p9e97MGkU,5744
|
|
24
|
-
nextrec/loss/loss_utils.py,sha256=Bl3PJ-AQrTDlV_uGJV4M6XCgmf8X2Z0h4nAP9o40ngU,4168
|
|
25
24
|
nextrec/loss/pairwise.py,sha256=X9yg-8pcPt2IWU0AiUhWAt3_4W_3wIF0uSdDYTdoPFY,3398
|
|
26
25
|
nextrec/loss/pointwise.py,sha256=o9J3OznY0hlbDsUXqn3k-BBzYiuUH5dopz8QBFqS_kQ,7343
|
|
27
26
|
nextrec/models/generative/__init__.py,sha256=0MV3P-_ainPaTxmRBGWKUVCEt14KJvuvEHmRB3OQ1Fs,176
|
|
@@ -65,17 +64,18 @@ nextrec/models/retrieval/sdm.py,sha256=vNsfqmIP5XwuUJve8ettCByXfHloq6BM7SHYeIr2s
|
|
|
65
64
|
nextrec/models/retrieval/youtube_dnn.py,sha256=ck4ja8v_BXG7X6AoEU_bUExvSHCWJBo-fNw3Y4zz9IQ,7194
|
|
66
65
|
nextrec/models/sequential/hstu.py,sha256=tlZR-UMhY5dMQVqmWYdkUg54h5W3vQUctwHA5TyThMo,19664
|
|
67
66
|
nextrec/models/sequential/sasrec.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
68
|
-
nextrec/utils/__init__.py,sha256=
|
|
67
|
+
nextrec/utils/__init__.py,sha256=jD73RLigxcHFP-rXBoPi2VUTKH7kE5vNMQkr4lW8UUY,2655
|
|
69
68
|
nextrec/utils/config.py,sha256=UIi4zntP2g4IJaeMQYoa6kMQlU_23Hq4N1ZugMgnB5A,20331
|
|
70
69
|
nextrec/utils/console.py,sha256=RA3ZTjtUQXvueouSmXJNLkRjeUGQZesphwWjFMTbV4I,13577
|
|
71
70
|
nextrec/utils/data.py,sha256=pSL96mWjWfW_RKE-qlUSs9vfiYnFZAaRirzA6r7DB6s,24994
|
|
72
71
|
nextrec/utils/embedding.py,sha256=akAEc062MG2cD7VIOllHaqtwzAirQR2gq5iW7oKpGAU,1449
|
|
73
72
|
nextrec/utils/feature.py,sha256=E3NOFIW8gAoRXVrDhCSonzg8k7nMUZyZzMfCq9k73_A,623
|
|
73
|
+
nextrec/utils/loss.py,sha256=GBWQGpDaYkMJySpdG078XbeUNXUC34PVqFy0AqNS9N0,4578
|
|
74
74
|
nextrec/utils/model.py,sha256=fHvFciUuMOVcM1oWiRva4LcArRdZ1R5Uzml-COSqqvM,4688
|
|
75
75
|
nextrec/utils/torch_utils.py,sha256=1lvZ7BG-rGLIAlumQIoeq5T9dO9hx2p8sa2_DC_bTZU,11564
|
|
76
|
-
nextrec/utils/types.py,sha256=
|
|
77
|
-
nextrec-0.4.
|
|
78
|
-
nextrec-0.4.
|
|
79
|
-
nextrec-0.4.
|
|
80
|
-
nextrec-0.4.
|
|
81
|
-
nextrec-0.4.
|
|
76
|
+
nextrec/utils/types.py,sha256=TDtPStmsOgYuiO4VpiEknoqWFdIe9yfrKLHKFCxrFJA,1057
|
|
77
|
+
nextrec-0.4.22.dist-info/METADATA,sha256=YHgBEJA3nEAW3AJCk8xvHAbsDTdEMtRU2eskHKB09g8,21742
|
|
78
|
+
nextrec-0.4.22.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
79
|
+
nextrec-0.4.22.dist-info/entry_points.txt,sha256=NN-dNSdfMRTv86bNXM7d3ZEPW2BQC6bRi7QP7i9cIps,45
|
|
80
|
+
nextrec-0.4.22.dist-info/licenses/LICENSE,sha256=2fQfVKeafywkni7MYHyClC6RGGC3laLTXCNBx-ubtp0,1064
|
|
81
|
+
nextrec-0.4.22.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|