nextrec 0.1.11__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. nextrec/__version__.py +1 -1
  2. nextrec/basic/activation.py +1 -2
  3. nextrec/basic/callback.py +1 -2
  4. nextrec/basic/features.py +39 -8
  5. nextrec/basic/layers.py +3 -4
  6. nextrec/basic/loggers.py +15 -10
  7. nextrec/basic/metrics.py +1 -2
  8. nextrec/basic/model.py +160 -125
  9. nextrec/basic/session.py +150 -0
  10. nextrec/data/__init__.py +13 -2
  11. nextrec/data/data_utils.py +74 -22
  12. nextrec/data/dataloader.py +513 -0
  13. nextrec/data/preprocessor.py +494 -134
  14. nextrec/loss/__init__.py +31 -24
  15. nextrec/loss/listwise.py +164 -0
  16. nextrec/loss/loss_utils.py +133 -106
  17. nextrec/loss/pairwise.py +105 -0
  18. nextrec/loss/pointwise.py +198 -0
  19. nextrec/models/match/dssm.py +26 -17
  20. nextrec/models/match/dssm_v2.py +20 -2
  21. nextrec/models/match/mind.py +18 -3
  22. nextrec/models/match/sdm.py +17 -2
  23. nextrec/models/match/youtube_dnn.py +23 -10
  24. nextrec/models/multi_task/esmm.py +8 -8
  25. nextrec/models/multi_task/mmoe.py +8 -8
  26. nextrec/models/multi_task/ple.py +8 -8
  27. nextrec/models/multi_task/share_bottom.py +8 -8
  28. nextrec/models/ranking/__init__.py +8 -0
  29. nextrec/models/ranking/afm.py +5 -4
  30. nextrec/models/ranking/autoint.py +6 -4
  31. nextrec/models/ranking/dcn.py +6 -4
  32. nextrec/models/ranking/deepfm.py +5 -4
  33. nextrec/models/ranking/dien.py +6 -4
  34. nextrec/models/ranking/din.py +6 -4
  35. nextrec/models/ranking/fibinet.py +6 -4
  36. nextrec/models/ranking/fm.py +6 -4
  37. nextrec/models/ranking/masknet.py +6 -4
  38. nextrec/models/ranking/pnn.py +6 -4
  39. nextrec/models/ranking/widedeep.py +6 -4
  40. nextrec/models/ranking/xdeepfm.py +6 -4
  41. nextrec/utils/__init__.py +7 -11
  42. nextrec/utils/embedding.py +2 -4
  43. nextrec/utils/initializer.py +4 -5
  44. nextrec/utils/optimizer.py +7 -8
  45. {nextrec-0.1.11.dist-info → nextrec-0.2.2.dist-info}/METADATA +3 -3
  46. nextrec-0.2.2.dist-info/RECORD +53 -0
  47. nextrec/basic/dataloader.py +0 -447
  48. nextrec/loss/match_losses.py +0 -294
  49. nextrec/utils/common.py +0 -14
  50. nextrec-0.1.11.dist-info/RECORD +0 -51
  51. {nextrec-0.1.11.dist-info → nextrec-0.2.2.dist-info}/WHEEL +0 -0
  52. {nextrec-0.1.11.dist-info → nextrec-0.2.2.dist-info}/licenses/LICENSE +0 -0
@@ -1,294 +0,0 @@
1
- """
2
- Loss functions for matching tasks
3
-
4
- Date: create on 13/11/2025
5
- Author:
6
- Yang Zhou,zyaztec@gmail.com
7
- """
8
-
9
- import torch
10
- import torch.nn as nn
11
- import torch.nn.functional as F
12
- from typing import Optional
13
-
14
-
15
- class BPRLoss(nn.Module):
16
- def __init__(self, reduction: str = 'mean'):
17
- super(BPRLoss, self).__init__()
18
- self.reduction = reduction
19
-
20
- def forward(self, pos_score: torch.Tensor, neg_score: torch.Tensor) -> torch.Tensor:
21
- if neg_score.dim() == 2:
22
- pos_score = pos_score.unsqueeze(1) # [batch_size, 1]
23
- diff = pos_score - neg_score # [batch_size, num_neg]
24
- loss = -torch.log(torch.sigmoid(diff) + 1e-8)
25
- if self.reduction == 'mean':
26
- return loss.mean()
27
- elif self.reduction == 'sum':
28
- return loss.sum()
29
- else:
30
- return loss
31
- else:
32
- diff = pos_score - neg_score
33
- loss = -torch.log(torch.sigmoid(diff) + 1e-8)
34
- if self.reduction == 'mean':
35
- return loss.mean()
36
- elif self.reduction == 'sum':
37
- return loss.sum()
38
- else:
39
- return loss
40
-
41
-
42
- class HingeLoss(nn.Module):
43
- def __init__(self, margin: float = 1.0, reduction: str = 'mean'):
44
- super(HingeLoss, self).__init__()
45
- self.margin = margin
46
- self.reduction = reduction
47
-
48
- def forward(self, pos_score: torch.Tensor, neg_score: torch.Tensor) -> torch.Tensor:
49
- if neg_score.dim() == 2:
50
- pos_score = pos_score.unsqueeze(1) # [batch_size, 1]
51
-
52
- diff = pos_score - neg_score
53
- loss = torch.clamp(self.margin - diff, min=0)
54
-
55
- if self.reduction == 'mean':
56
- return loss.mean()
57
- elif self.reduction == 'sum':
58
- return loss.sum()
59
- else:
60
- return loss
61
-
62
-
63
- class TripletLoss(nn.Module):
64
- def __init__(self, margin: float = 1.0, reduction: str = 'mean', distance: str = 'euclidean'):
65
- super(TripletLoss, self).__init__()
66
- self.margin = margin
67
- self.reduction = reduction
68
- self.distance = distance
69
-
70
- def forward(self, anchor: torch.Tensor, positive: torch.Tensor, negative: torch.Tensor) -> torch.Tensor:
71
- if self.distance == 'euclidean':
72
- pos_dist = torch.sum((anchor - positive) ** 2, dim=-1)
73
-
74
- if negative.dim() == 3:
75
- anchor_expanded = anchor.unsqueeze(1) # [batch_size, 1, dim]
76
- neg_dist = torch.sum((anchor_expanded - negative) ** 2, dim=-1) # [batch_size, num_neg]
77
- else:
78
- neg_dist = torch.sum((anchor - negative) ** 2, dim=-1)
79
-
80
- if neg_dist.dim() == 2:
81
- pos_dist = pos_dist.unsqueeze(1) # [batch_size, 1]
82
-
83
- elif self.distance == 'cosine':
84
- pos_dist = 1 - F.cosine_similarity(anchor, positive, dim=-1)
85
-
86
- if negative.dim() == 3:
87
- anchor_expanded = anchor.unsqueeze(1) # [batch_size, 1, dim]
88
- neg_dist = 1 - F.cosine_similarity(anchor_expanded, negative, dim=-1)
89
- else:
90
- neg_dist = 1 - F.cosine_similarity(anchor, negative, dim=-1)
91
-
92
- if neg_dist.dim() == 2:
93
- pos_dist = pos_dist.unsqueeze(1)
94
- else:
95
- raise ValueError(f"Unsupported distance: {self.distance}")
96
-
97
- loss = torch.clamp(pos_dist - neg_dist + self.margin, min=0)
98
-
99
- if self.reduction == 'mean':
100
- return loss.mean()
101
- elif self.reduction == 'sum':
102
- return loss.sum()
103
- else:
104
- return loss
105
-
106
-
107
- class SampledSoftmaxLoss(nn.Module):
108
- def __init__(self, reduction: str = 'mean'):
109
- super(SampledSoftmaxLoss, self).__init__()
110
- self.reduction = reduction
111
-
112
- def forward(self, pos_logits: torch.Tensor, neg_logits: torch.Tensor) -> torch.Tensor:
113
- pos_logits = pos_logits.unsqueeze(1) # [batch_size, 1]
114
- all_logits = torch.cat([pos_logits, neg_logits], dim=1) # [batch_size, 1 + num_neg]
115
- targets = torch.zeros(all_logits.size(0), dtype=torch.long, device=all_logits.device)
116
- loss = F.cross_entropy(all_logits, targets, reduction=self.reduction)
117
-
118
- return loss
119
-
120
-
121
- class CosineContrastiveLoss(nn.Module):
122
- def __init__(self, margin: float = 0.5, reduction: str = 'mean'):
123
- super(CosineContrastiveLoss, self).__init__()
124
- self.margin = margin
125
- self.reduction = reduction
126
-
127
- def forward(self, user_emb: torch.Tensor, item_emb: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
128
- similarity = F.cosine_similarity(user_emb, item_emb, dim=-1)
129
- pos_loss = (1 - similarity) * labels
130
-
131
- neg_loss = torch.clamp(similarity - self.margin, min=0) * (1 - labels)
132
-
133
- loss = pos_loss + neg_loss
134
-
135
- if self.reduction == 'mean':
136
- return loss.mean()
137
- elif self.reduction == 'sum':
138
- return loss.sum()
139
- else:
140
- return loss
141
-
142
-
143
- class InfoNCELoss(nn.Module):
144
- def __init__(self, temperature: float = 0.07, reduction: str = 'mean'):
145
- super(InfoNCELoss, self).__init__()
146
- self.temperature = temperature
147
- self.reduction = reduction
148
-
149
- def forward(self, query: torch.Tensor, pos_key: torch.Tensor, neg_keys: torch.Tensor) -> torch.Tensor:
150
- pos_sim = torch.sum(query * pos_key, dim=-1) / self.temperature # [batch_size]
151
- pos_sim = pos_sim.unsqueeze(1) # [batch_size, 1]
152
- query_expanded = query.unsqueeze(1) # [batch_size, 1, dim]
153
- neg_sim = torch.sum(query_expanded * neg_keys, dim=-1) / self.temperature # [batch_size, num_neg]
154
- logits = torch.cat([pos_sim, neg_sim], dim=1) # [batch_size, 1 + num_neg]
155
- labels = torch.zeros(logits.size(0), dtype=torch.long, device=logits.device)
156
-
157
- loss = F.cross_entropy(logits, labels, reduction=self.reduction)
158
-
159
- return loss
160
-
161
-
162
- class ListNetLoss(nn.Module):
163
- """
164
- ListNet loss using top-1 probability distribution
165
- Reference: Cao et al. Learning to Rank: From Pairwise Approach to Listwise Approach (ICML 2007)
166
- """
167
- def __init__(self, temperature: float = 1.0, reduction: str = 'mean'):
168
- super(ListNetLoss, self).__init__()
169
- self.temperature = temperature
170
- self.reduction = reduction
171
-
172
- def forward(self, scores: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
173
- # Convert scores and labels to probability distributions
174
- pred_probs = F.softmax(scores / self.temperature, dim=1)
175
- true_probs = F.softmax(labels / self.temperature, dim=1)
176
-
177
- # Cross entropy between two distributions
178
- loss = -torch.sum(true_probs * torch.log(pred_probs + 1e-10), dim=1)
179
-
180
- if self.reduction == 'mean':
181
- return loss.mean()
182
- elif self.reduction == 'sum':
183
- return loss.sum()
184
- else:
185
- return loss
186
-
187
-
188
- class ListMLELoss(nn.Module):
189
- """
190
- ListMLE (Maximum Likelihood Estimation) loss
191
- Reference: Xia et al. Listwise approach to learning to rank: theory and algorithm (ICML 2008)
192
- """
193
- def __init__(self, reduction: str = 'mean'):
194
- super(ListMLELoss, self).__init__()
195
- self.reduction = reduction
196
-
197
- def forward(self, scores: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
198
- # Sort by labels in descending order to get ground truth ranking
199
- sorted_labels, sorted_indices = torch.sort(labels, descending=True, dim=1)
200
-
201
- # Reorder scores according to ground truth ranking
202
- batch_size, list_size = scores.shape
203
- batch_indices = torch.arange(batch_size, device=scores.device).unsqueeze(1).expand(-1, list_size)
204
- sorted_scores = scores[batch_indices, sorted_indices]
205
-
206
- # Compute log likelihood
207
- # For each position, compute log(exp(score_i) / sum(exp(score_j) for j >= i))
208
- loss = torch.tensor(0.0, device=scores.device)
209
- for i in range(list_size):
210
- # Log-sum-exp trick for numerical stability
211
- remaining_scores = sorted_scores[:, i:]
212
- log_sum_exp = torch.logsumexp(remaining_scores, dim=1)
213
- loss = loss + (log_sum_exp - sorted_scores[:, i]).sum()
214
-
215
- if self.reduction == 'mean':
216
- return loss / batch_size
217
- elif self.reduction == 'sum':
218
- return loss
219
- else:
220
- return loss / batch_size
221
-
222
-
223
- class ApproxNDCGLoss(nn.Module):
224
- """
225
- Approximate NDCG loss for learning to rank
226
- Reference: Qin et al. A General Approximation Framework for Direct Optimization of
227
- Information Retrieval Measures (Information Retrieval 2010)
228
- """
229
- def __init__(self, temperature: float = 1.0, reduction: str = 'mean'):
230
- super(ApproxNDCGLoss, self).__init__()
231
- self.temperature = temperature
232
- self.reduction = reduction
233
-
234
- def _dcg(self, relevance: torch.Tensor, k: Optional[int] = None) -> torch.Tensor:
235
- if k is not None:
236
- relevance = relevance[:, :k]
237
-
238
- # DCG = sum(rel_i / log2(i + 2)) for i in range(list_size)
239
- positions = torch.arange(1, relevance.size(1) + 1, device=relevance.device, dtype=torch.float32)
240
- discounts = torch.log2(positions + 1.0)
241
- dcg = torch.sum(relevance / discounts, dim=1)
242
-
243
- return dcg
244
-
245
- def forward(self, scores: torch.Tensor, labels: torch.Tensor, k: Optional[int] = None) -> torch.Tensor:
246
- """
247
- Args:
248
- scores: Predicted scores [batch_size, list_size]
249
- labels: Ground truth relevance labels [batch_size, list_size]
250
- k: Top-k items for NDCG@k (if None, use all items)
251
-
252
- Returns:
253
- Approximate NDCG loss (1 - NDCG)
254
- """
255
- batch_size = scores.size(0)
256
-
257
- # Use differentiable sorting approximation with softmax
258
- # Create pairwise comparison matrix
259
- scores_expanded = scores.unsqueeze(2) # [batch_size, list_size, 1]
260
- scores_tiled = scores.unsqueeze(1) # [batch_size, 1, list_size]
261
-
262
- # Compute pairwise probabilities using sigmoid
263
- pairwise_diff = (scores_expanded - scores_tiled) / self.temperature
264
- pairwise_probs = torch.sigmoid(pairwise_diff) # [batch_size, list_size, list_size]
265
-
266
- # Approximate ranking positions
267
- # ranking_probs[i, j] ≈ probability that item i is ranked at position j
268
- # We use softmax approximation for differentiable ranking
269
- ranking_weights = F.softmax(scores / self.temperature, dim=1)
270
-
271
- # Sort labels to get ideal DCG
272
- ideal_labels, _ = torch.sort(labels, descending=True, dim=1)
273
- ideal_dcg = self._dcg(ideal_labels, k)
274
-
275
- # Compute approximate DCG using soft ranking
276
- # Weight each item's relevance by its soft ranking position
277
- positions = torch.arange(1, scores.size(1) + 1, device=scores.device, dtype=torch.float32)
278
- discounts = 1.0 / torch.log2(positions + 1.0)
279
-
280
- # Approximate DCG by weighting relevance with ranking probabilities
281
- approx_dcg = torch.sum(labels * ranking_weights * discounts, dim=1)
282
-
283
- # Normalize by ideal DCG to get NDCG
284
- ndcg = approx_dcg / (ideal_dcg + 1e-10)
285
-
286
- # Loss is 1 - NDCG (we want to maximize NDCG, so minimize 1 - NDCG)
287
- loss = 1.0 - ndcg
288
-
289
- if self.reduction == 'mean':
290
- return loss.mean()
291
- elif self.reduction == 'sum':
292
- return loss.sum()
293
- else:
294
- return loss
nextrec/utils/common.py DELETED
@@ -1,14 +0,0 @@
1
- """
2
- Common utilities for NextRec
3
-
4
- Date: create on 13/11/2025
5
- Author:
6
- Yang Zhou, zyaztec@gmail.com
7
- """
8
-
9
-
10
- def get_task_type(model) -> str:
11
- """
12
- Get task type from model.
13
- """
14
- return model.task_type
@@ -1,51 +0,0 @@
1
- nextrec/__init__.py,sha256=CvocnY2uBp0cjNkhrT6ogw0q2bN9s1GNp754FLO-7lo,1117
2
- nextrec/__version__.py,sha256=nllDrH0jyChMuuYrK0CC55iTBKUNTUjejtcwxyUF2EQ,23
3
- nextrec/basic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- nextrec/basic/activation.py,sha256=XJDTFzmacpLq8DMNbFVhZ3WhlOmKDE88vp0udnVpXtE,2808
5
- nextrec/basic/callback.py,sha256=c0QeolbPJzCYhJnPf9rrZwFU13zmLxg59nvQGbpetNo,1039
6
- nextrec/basic/dataloader.py,sha256=roG1a7VRPpdy9XBv0rJg7wz00ggok9WNHU_EoDIxY2c,18898
7
- nextrec/basic/features.py,sha256=wJbiDqE_qWA5gArUm-NYHaLgk7AMxpA7loaovf84dSU,2526
8
- nextrec/basic/layers.py,sha256=dvMir_0PJQfZv0uCUeqyiJpb-QOz0f2CUu2Cuuxh7iA,38300
9
- nextrec/basic/loggers.py,sha256=0fupxPiHrKcBEJTBm0Sjcim0rU-n0gYKuy6IiCYX1Bw,3480
10
- nextrec/basic/metrics.py,sha256=p79-IRRprLcXjjicrG41vM0zwRGtUY5tTPoybpvz-io,20402
11
- nextrec/basic/model.py,sha256=HeExyUkhihiARJvCXfLztkMDfUELR9WGJKtn26QAfPc,65879
12
- nextrec/data/__init__.py,sha256=vvBNAdHcVO54aaaT-SyYHWsPHhoH8GvrlZ2hMRjqyF8,524
13
- nextrec/data/data_utils.py,sha256=rpcj5CIWw8RlLn1NYva_gEOlpYG1cy65rB1BSv23XAM,4113
14
- nextrec/data/preprocessor.py,sha256=0gYc_nH6ek3QxgncSZ8B8KyYmIYdCFMx9rSEdo4-aFw,26442
15
- nextrec/loss/__init__.py,sha256=kBanUB5rxQKwXTd6f-2hOI_CF7cp_MClAwAeVXIkpig,647
16
- nextrec/loss/loss_utils.py,sha256=3zeeLBG4lNIXCO94jx-BYlSHl14t-U7L06dQuzVSPJ8,4752
17
- nextrec/loss/match_losses.py,sha256=BaH4GKVSFU_PNhHPP_JuAM5zwjOIPxcbuNLYpK0-EWA,11652
18
- nextrec/models/generative/hstu.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
- nextrec/models/generative/tiger.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
- nextrec/models/match/__init__.py,sha256=ASZB5abqKPhDbk8NErNNNa0DHuWpsVxvUtyEn5XMx6Y,215
21
- nextrec/models/match/dssm.py,sha256=rl-8-4pG5_DgxM0VYZuPzLP1lhvDF1BbQQoqxBMMqYw,7702
22
- nextrec/models/match/dssm_v2.py,sha256=eyWrCo8g8y-e_fq5168iTA_xuHdYWBT9v96GaPor6-o,6407
23
- nextrec/models/match/mind.py,sha256=5g7b-VOShPQ9D4FG-1z8exKYFLJS_z_Lt2bvU_qSC98,8735
24
- nextrec/models/match/sdm.py,sha256=rJK49438-49JvzNQX2Vi6Zn1sn9twjyOb2YG2lVaGlc,10238
25
- nextrec/models/match/youtube_dnn.py,sha256=Su5kwrHGRXrv_4psXZgr9hXpUF5bdosXqdmtHb5J2Vs,6834
26
- nextrec/models/multi_task/esmm.py,sha256=0dn8pJ7BAQh5oqpNZISSiTb6sgXddsG99eOdpQVMSTU,4817
27
- nextrec/models/multi_task/mmoe.py,sha256=vly9c8e-Xc_m9AjWUmTGtidf67bjiHPwwbAFbXc0XpM,6099
28
- nextrec/models/multi_task/ple.py,sha256=mM8shre3BX-sg_peokMh35_-wQAMG5UI2eUfhyRzTgs,11269
29
- nextrec/models/multi_task/share_bottom.py,sha256=MzShugQya1rSovhbvmTDD4Uf1MRCGfgIKqKXVsz0RTo,4451
30
- nextrec/models/ranking/__init__.py,sha256=GMn3R5TkE9W17dzHuQoASJsQCoM_EIHuUhnMS2jMdZw,291
31
- nextrec/models/ranking/afm.py,sha256=BZvGyJZ9aAoL3P8ebsMoQ9HqX2UyKkFdktfz3_VMalA,4483
32
- nextrec/models/ranking/autoint.py,sha256=D9jeEP0w-IssbporOIPzTzi6PveiYVcgN7D6AXYxyLc,5580
33
- nextrec/models/ranking/dcn.py,sha256=HyXXzooS1zqOWU6MAPi6tBdmDs4o64HP9vBV5fYdKO4,4134
34
- nextrec/models/ranking/deepfm.py,sha256=Yl95d4r0dytcZSn4A8ukgxOQ8eaF0t5MqDd9KPfkdPI,3453
35
- nextrec/models/ranking/dien.py,sha256=2maimf_c6L-I0JpJNbmpIjbMV8uCndrdFiqvjwxMaj8,8401
36
- nextrec/models/ranking/din.py,sha256=Qs4IxfvCmT2lGtZ6BvgdzMoT0lCy88yaXE1FecaMo2c,7122
37
- nextrec/models/ranking/fibinet.py,sha256=h6a738bo3VikKHKZhOzk_p9YGNs7hWcpEOkJvOMDR88,4779
38
- nextrec/models/ranking/fm.py,sha256=WsbQV8RUc2O7b66GRZicNWaWOtin_QLO8e_Skjk5aIY,2887
39
- nextrec/models/ranking/masknet.py,sha256=ADki3oMR7PwWgcf5GhIUQJxto-gFNmIlU-GRsdi04Jk,4565
40
- nextrec/models/ranking/pnn.py,sha256=ZhsUh-O_kLJLfK28dp81DMGYnzMkO-L86CgESlT2TB0,4883
41
- nextrec/models/ranking/widedeep.py,sha256=7EylqHFaxrclRr-PVhKRxBLOOf8E5-AJbWfJqZpdzy0,3642
42
- nextrec/models/ranking/xdeepfm.py,sha256=p2PrQHxmvABdQl1wLnP5VyRy5Chdp7Xcw1FJw7m1LFY,4200
43
- nextrec/utils/__init__.py,sha256=-wyEzZrYQ9QL5zPbWdBIWzg-HbT-2wmmbH2Kceuzlzk,510
44
- nextrec/utils/common.py,sha256=-LrRY1MFAhgeyZkKyqdVQGxev6eH3gigNtlRKw5f8Iw,214
45
- nextrec/utils/embedding.py,sha256=Xl5bXAdxdGc0FV3FthNqJe9MP0M_rZI1uaOlPi3vLj8,478
46
- nextrec/utils/initializer.py,sha256=ka5sgXWqAb9x5hQS6ypgonR93OUajBVUAwO7q-JPjIE,1660
47
- nextrec/utils/optimizer.py,sha256=g9IETUdflM89YKSzInP_iS_hTnDy_cjpm6Wcq9V9_vE,2468
48
- nextrec-0.1.11.dist-info/METADATA,sha256=DNNd7XuW1MZH4fa4tLqQsxKfcveloyR6Oo1YMBIr34A,11425
49
- nextrec-0.1.11.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
50
- nextrec-0.1.11.dist-info/licenses/LICENSE,sha256=2fQfVKeafywkni7MYHyClC6RGGC3laLTXCNBx-ubtp0,1064
51
- nextrec-0.1.11.dist-info/RECORD,,