replay-rec 0.20.0__py3-none-any.whl → 0.20.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- replay/__init__.py +1 -1
- replay/data/dataset.py +10 -9
- replay/data/dataset_utils/dataset_label_encoder.py +5 -4
- replay/data/nn/schema.py +9 -18
- replay/data/nn/sequence_tokenizer.py +26 -18
- replay/data/nn/sequential_dataset.py +22 -18
- replay/data/nn/torch_sequential_dataset.py +17 -16
- replay/data/nn/utils.py +2 -1
- replay/data/schema.py +3 -12
- replay/metrics/base_metric.py +11 -10
- replay/metrics/categorical_diversity.py +8 -8
- replay/metrics/coverage.py +4 -4
- replay/metrics/experiment.py +3 -3
- replay/metrics/hitrate.py +1 -3
- replay/metrics/map.py +1 -3
- replay/metrics/mrr.py +1 -3
- replay/metrics/ndcg.py +1 -2
- replay/metrics/novelty.py +3 -3
- replay/metrics/offline_metrics.py +16 -16
- replay/metrics/precision.py +1 -3
- replay/metrics/recall.py +1 -3
- replay/metrics/rocauc.py +1 -3
- replay/metrics/surprisal.py +4 -4
- replay/metrics/torch_metrics_builder.py +13 -12
- replay/metrics/unexpectedness.py +2 -2
- replay/models/als.py +2 -2
- replay/models/association_rules.py +4 -3
- replay/models/base_neighbour_rec.py +3 -2
- replay/models/base_rec.py +11 -10
- replay/models/cat_pop_rec.py +2 -1
- replay/models/extensions/ann/ann_mixin.py +2 -1
- replay/models/extensions/ann/index_builders/executor_hnswlib_index_builder.py +2 -1
- replay/models/extensions/ann/index_builders/executor_nmslib_index_builder.py +2 -1
- replay/models/lin_ucb.py +57 -11
- replay/models/nn/optimizer_utils/optimizer_factory.py +2 -2
- replay/models/nn/sequential/bert4rec/dataset.py +5 -18
- replay/models/nn/sequential/bert4rec/lightning.py +3 -3
- replay/models/nn/sequential/bert4rec/model.py +2 -2
- replay/models/nn/sequential/callbacks/prediction_callbacks.py +12 -12
- replay/models/nn/sequential/callbacks/validation_callback.py +9 -9
- replay/models/nn/sequential/compiled/base_compiled_model.py +5 -5
- replay/models/nn/sequential/postprocessors/_base.py +2 -3
- replay/models/nn/sequential/postprocessors/postprocessors.py +11 -11
- replay/models/nn/sequential/sasrec/dataset.py +3 -16
- replay/models/nn/sequential/sasrec/lightning.py +3 -3
- replay/models/nn/sequential/sasrec/model.py +8 -8
- replay/models/slim.py +2 -2
- replay/models/ucb.py +2 -2
- replay/models/word2vec.py +3 -3
- replay/preprocessing/discretizer.py +8 -7
- replay/preprocessing/filters.py +4 -4
- replay/preprocessing/history_based_fp.py +6 -6
- replay/preprocessing/label_encoder.py +8 -7
- replay/scenarios/fallback.py +4 -3
- replay/splitters/base_splitter.py +3 -3
- replay/splitters/cold_user_random_splitter.py +4 -4
- replay/splitters/k_folds.py +4 -4
- replay/splitters/last_n_splitter.py +10 -10
- replay/splitters/new_users_splitter.py +4 -4
- replay/splitters/random_splitter.py +4 -4
- replay/splitters/ratio_splitter.py +10 -10
- replay/splitters/time_splitter.py +6 -6
- replay/splitters/two_stage_splitter.py +4 -4
- replay/utils/__init__.py +1 -1
- replay/utils/common.py +1 -1
- replay/utils/session_handler.py +2 -2
- replay/utils/spark_utils.py +6 -5
- replay/utils/types.py +3 -1
- {replay_rec-0.20.0.dist-info → replay_rec-0.20.1.dist-info}/METADATA +7 -1
- {replay_rec-0.20.0.dist-info → replay_rec-0.20.1.dist-info}/RECORD +73 -74
- replay/utils/warnings.py +0 -26
- {replay_rec-0.20.0.dist-info → replay_rec-0.20.1.dist-info}/WHEEL +0 -0
- {replay_rec-0.20.0.dist-info → replay_rec-0.20.1.dist-info}/licenses/LICENSE +0 -0
- {replay_rec-0.20.0.dist-info → replay_rec-0.20.1.dist-info}/licenses/NOTICE +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Optional
|
|
1
|
+
from typing import Optional
|
|
2
2
|
|
|
3
3
|
from replay.utils import DataFrameLike, PandasDataFrame, PolarsDataFrame, SparkDataFrame
|
|
4
4
|
|
|
@@ -48,20 +48,20 @@ class RandomSplitter(Splitter):
|
|
|
48
48
|
|
|
49
49
|
def _random_split_spark(
|
|
50
50
|
self, interactions: SparkDataFrame, threshold: float
|
|
51
|
-
) ->
|
|
51
|
+
) -> tuple[SparkDataFrame, SparkDataFrame]:
|
|
52
52
|
train, test = interactions.randomSplit([1 - threshold, threshold], self.seed)
|
|
53
53
|
return train, test
|
|
54
54
|
|
|
55
55
|
def _random_split_pandas(
|
|
56
56
|
self, interactions: PandasDataFrame, threshold: float
|
|
57
|
-
) ->
|
|
57
|
+
) -> tuple[PandasDataFrame, PandasDataFrame]:
|
|
58
58
|
train = interactions.sample(frac=(1 - threshold), random_state=self.seed)
|
|
59
59
|
test = interactions.drop(train.index)
|
|
60
60
|
return train, test
|
|
61
61
|
|
|
62
62
|
def _random_split_polars(
|
|
63
63
|
self, interactions: PolarsDataFrame, threshold: float
|
|
64
|
-
) ->
|
|
64
|
+
) -> tuple[PolarsDataFrame, PolarsDataFrame]:
|
|
65
65
|
train_size = int(len(interactions) * (1 - threshold)) + 1
|
|
66
66
|
shuffled_interactions = interactions.sample(fraction=1, shuffle=True, seed=self.seed)
|
|
67
67
|
train = shuffled_interactions[:train_size]
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import
|
|
1
|
+
from typing import Optional
|
|
2
2
|
|
|
3
3
|
import polars as pl
|
|
4
4
|
|
|
@@ -200,7 +200,7 @@ class RatioSplitter(Splitter):
|
|
|
200
200
|
|
|
201
201
|
def _partial_split_fractions(
|
|
202
202
|
self, interactions: DataFrameLike, ratio: float
|
|
203
|
-
) ->
|
|
203
|
+
) -> tuple[DataFrameLike, DataFrameLike]:
|
|
204
204
|
res = self._add_time_partition(interactions)
|
|
205
205
|
train_size = round(1 - ratio, self._precision)
|
|
206
206
|
|
|
@@ -212,7 +212,7 @@ class RatioSplitter(Splitter):
|
|
|
212
212
|
|
|
213
213
|
def _partial_split_fractions_pandas(
|
|
214
214
|
self, interactions: PandasDataFrame, train_size: float
|
|
215
|
-
) ->
|
|
215
|
+
) -> tuple[PandasDataFrame, PandasDataFrame]:
|
|
216
216
|
interactions["count"] = interactions.groupby(self.divide_column, sort=False)[self.divide_column].transform(len)
|
|
217
217
|
interactions["frac"] = (interactions["row_num"] / interactions["count"]).round(self._precision)
|
|
218
218
|
if self.min_interactions_per_group is not None:
|
|
@@ -229,7 +229,7 @@ class RatioSplitter(Splitter):
|
|
|
229
229
|
|
|
230
230
|
def _partial_split_fractions_spark(
|
|
231
231
|
self, interactions: SparkDataFrame, train_size: float
|
|
232
|
-
) ->
|
|
232
|
+
) -> tuple[SparkDataFrame, SparkDataFrame]:
|
|
233
233
|
interactions = interactions.withColumn(
|
|
234
234
|
"count", sf.count(self.timestamp_column).over(Window.partitionBy(self.divide_column))
|
|
235
235
|
)
|
|
@@ -257,7 +257,7 @@ class RatioSplitter(Splitter):
|
|
|
257
257
|
|
|
258
258
|
def _partial_split_fractions_polars(
|
|
259
259
|
self, interactions: PolarsDataFrame, train_size: float
|
|
260
|
-
) ->
|
|
260
|
+
) -> tuple[PolarsDataFrame, PolarsDataFrame]:
|
|
261
261
|
interactions = interactions.with_columns(
|
|
262
262
|
pl.count(self.timestamp_column).over(pl.col(self.divide_column)).alias("count")
|
|
263
263
|
)
|
|
@@ -282,7 +282,7 @@ class RatioSplitter(Splitter):
|
|
|
282
282
|
|
|
283
283
|
return train, test
|
|
284
284
|
|
|
285
|
-
def _partial_split(self, interactions: DataFrameLike, ratio: float) ->
|
|
285
|
+
def _partial_split(self, interactions: DataFrameLike, ratio: float) -> tuple[DataFrameLike, DataFrameLike]:
|
|
286
286
|
res = self._add_time_partition(interactions)
|
|
287
287
|
if isinstance(res, SparkDataFrame):
|
|
288
288
|
return self._partial_split_spark(res, ratio)
|
|
@@ -293,7 +293,7 @@ class RatioSplitter(Splitter):
|
|
|
293
293
|
|
|
294
294
|
def _partial_split_pandas(
|
|
295
295
|
self, interactions: PandasDataFrame, ratio: float
|
|
296
|
-
) ->
|
|
296
|
+
) -> tuple[PandasDataFrame, PandasDataFrame]:
|
|
297
297
|
interactions["count"] = interactions.groupby(self.divide_column, sort=False)[self.divide_column].transform(len)
|
|
298
298
|
interactions["train_size"] = interactions["count"] - (interactions["count"] * ratio).astype(int)
|
|
299
299
|
if self.min_interactions_per_group is not None:
|
|
@@ -319,7 +319,7 @@ class RatioSplitter(Splitter):
|
|
|
319
319
|
|
|
320
320
|
return train, test
|
|
321
321
|
|
|
322
|
-
def _partial_split_spark(self, interactions: SparkDataFrame, ratio: float) ->
|
|
322
|
+
def _partial_split_spark(self, interactions: SparkDataFrame, ratio: float) -> tuple[SparkDataFrame, SparkDataFrame]:
|
|
323
323
|
interactions = interactions.withColumn(
|
|
324
324
|
"count", sf.count(self.timestamp_column).over(Window.partitionBy(self.divide_column))
|
|
325
325
|
)
|
|
@@ -352,7 +352,7 @@ class RatioSplitter(Splitter):
|
|
|
352
352
|
|
|
353
353
|
def _partial_split_polars(
|
|
354
354
|
self, interactions: PolarsDataFrame, ratio: float
|
|
355
|
-
) ->
|
|
355
|
+
) -> tuple[PolarsDataFrame, PolarsDataFrame]:
|
|
356
356
|
interactions = interactions.with_columns(
|
|
357
357
|
pl.count(self.timestamp_column).over(self.divide_column).alias("count")
|
|
358
358
|
)
|
|
@@ -385,7 +385,7 @@ class RatioSplitter(Splitter):
|
|
|
385
385
|
|
|
386
386
|
return train, test
|
|
387
387
|
|
|
388
|
-
def _core_split(self, interactions: DataFrameLike) ->
|
|
388
|
+
def _core_split(self, interactions: DataFrameLike) -> list[DataFrameLike]:
|
|
389
389
|
if self.split_by_fractions:
|
|
390
390
|
return self._partial_split_fractions(interactions, self.test_size)
|
|
391
391
|
else:
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from datetime import datetime
|
|
2
|
-
from typing import
|
|
2
|
+
from typing import Optional, Union
|
|
3
3
|
|
|
4
4
|
import polars as pl
|
|
5
5
|
|
|
@@ -150,7 +150,7 @@ class TimeSplitter(Splitter):
|
|
|
150
150
|
|
|
151
151
|
def _partial_split(
|
|
152
152
|
self, interactions: DataFrameLike, threshold: Union[datetime, str, int]
|
|
153
|
-
) ->
|
|
153
|
+
) -> tuple[DataFrameLike, DataFrameLike]:
|
|
154
154
|
if isinstance(threshold, str):
|
|
155
155
|
threshold = datetime.strptime(threshold, self.time_column_format)
|
|
156
156
|
|
|
@@ -166,7 +166,7 @@ class TimeSplitter(Splitter):
|
|
|
166
166
|
|
|
167
167
|
def _partial_split_pandas(
|
|
168
168
|
self, interactions: PandasDataFrame, threshold: Union[datetime, str, int]
|
|
169
|
-
) ->
|
|
169
|
+
) -> tuple[PandasDataFrame, PandasDataFrame]:
|
|
170
170
|
res = interactions.copy(deep=True)
|
|
171
171
|
if isinstance(threshold, float):
|
|
172
172
|
res.sort_values(self.timestamp_column, inplace=True)
|
|
@@ -186,7 +186,7 @@ class TimeSplitter(Splitter):
|
|
|
186
186
|
|
|
187
187
|
def _partial_split_spark(
|
|
188
188
|
self, interactions: SparkDataFrame, threshold: Union[datetime, str, int]
|
|
189
|
-
) ->
|
|
189
|
+
) -> tuple[SparkDataFrame, SparkDataFrame]:
|
|
190
190
|
if isinstance(threshold, float):
|
|
191
191
|
dates = interactions.select(self.timestamp_column).withColumn(
|
|
192
192
|
"_row_number_by_ts", sf.row_number().over(Window.orderBy(self.timestamp_column))
|
|
@@ -208,7 +208,7 @@ class TimeSplitter(Splitter):
|
|
|
208
208
|
|
|
209
209
|
def _partial_split_polars(
|
|
210
210
|
self, interactions: PolarsDataFrame, threshold: Union[datetime, str, int]
|
|
211
|
-
) ->
|
|
211
|
+
) -> tuple[PolarsDataFrame, PolarsDataFrame]:
|
|
212
212
|
if isinstance(threshold, float):
|
|
213
213
|
test_start = int(len(interactions) * (1 - threshold)) + 1
|
|
214
214
|
|
|
@@ -225,5 +225,5 @@ class TimeSplitter(Splitter):
|
|
|
225
225
|
|
|
226
226
|
return train, test
|
|
227
227
|
|
|
228
|
-
def _core_split(self, interactions: DataFrameLike) ->
|
|
228
|
+
def _core_split(self, interactions: DataFrameLike) -> list[DataFrameLike]:
|
|
229
229
|
return self._partial_split(interactions, self.time_threshold)
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
This splitter split data by two columns.
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
from typing import Optional
|
|
5
|
+
from typing import Optional
|
|
6
6
|
|
|
7
7
|
import numpy as np
|
|
8
8
|
import polars as pl
|
|
@@ -165,7 +165,7 @@ class TwoStageSplitter(Splitter):
|
|
|
165
165
|
|
|
166
166
|
return test_users
|
|
167
167
|
|
|
168
|
-
def _split_proportion_spark(self, interactions: SparkDataFrame) ->
|
|
168
|
+
def _split_proportion_spark(self, interactions: SparkDataFrame) -> tuple[SparkDataFrame, SparkDataFrame]:
|
|
169
169
|
counts = interactions.groupBy(self.first_divide_column).count()
|
|
170
170
|
test_users = self._get_test_values(interactions).withColumn("is_test", sf.lit(True))
|
|
171
171
|
if self.shuffle:
|
|
@@ -197,7 +197,7 @@ class TwoStageSplitter(Splitter):
|
|
|
197
197
|
|
|
198
198
|
return train, test
|
|
199
199
|
|
|
200
|
-
def _split_proportion_pandas(self, interactions: PandasDataFrame) ->
|
|
200
|
+
def _split_proportion_pandas(self, interactions: PandasDataFrame) -> tuple[PandasDataFrame, PandasDataFrame]:
|
|
201
201
|
counts = (
|
|
202
202
|
interactions.groupby(self.first_divide_column).agg(count=(self.first_divide_column, "count")).reset_index()
|
|
203
203
|
)
|
|
@@ -224,7 +224,7 @@ class TwoStageSplitter(Splitter):
|
|
|
224
224
|
|
|
225
225
|
return train, test
|
|
226
226
|
|
|
227
|
-
def _split_proportion_polars(self, interactions: PolarsDataFrame) ->
|
|
227
|
+
def _split_proportion_polars(self, interactions: PolarsDataFrame) -> tuple[PolarsDataFrame, PolarsDataFrame]:
|
|
228
228
|
counts = interactions.group_by(self.first_divide_column).count()
|
|
229
229
|
test_users = self._get_test_values(interactions).with_columns(pl.lit(True).alias("is_test"))
|
|
230
230
|
if self.shuffle:
|
replay/utils/__init__.py
CHANGED
replay/utils/common.py
CHANGED
|
@@ -73,7 +73,7 @@ def load_from_replay(path: Union[str, Path], **kwargs) -> SavableObject:
|
|
|
73
73
|
:param path: Path to save the object.
|
|
74
74
|
"""
|
|
75
75
|
path = Path(path).with_suffix(".replay").resolve()
|
|
76
|
-
with open(path / "init_args.json"
|
|
76
|
+
with open(path / "init_args.json") as file:
|
|
77
77
|
class_name = json.loads(file.read())["_class_name"]
|
|
78
78
|
obj_type = globals()[class_name]
|
|
79
79
|
obj = obj_type.load(path, **kwargs)
|
replay/utils/session_handler.py
CHANGED
|
@@ -6,7 +6,7 @@ import logging
|
|
|
6
6
|
import os
|
|
7
7
|
import sys
|
|
8
8
|
from math import floor
|
|
9
|
-
from typing import Any,
|
|
9
|
+
from typing import Any, Optional
|
|
10
10
|
|
|
11
11
|
import psutil
|
|
12
12
|
|
|
@@ -113,7 +113,7 @@ class Borg:
|
|
|
113
113
|
This class allows to share objects between instances.
|
|
114
114
|
"""
|
|
115
115
|
|
|
116
|
-
_shared_state:
|
|
116
|
+
_shared_state: dict[str, Any] = {}
|
|
117
117
|
|
|
118
118
|
def __init__(self):
|
|
119
119
|
self.__dict__ = self._shared_state
|
replay/utils/spark_utils.py
CHANGED
|
@@ -3,7 +3,8 @@ import logging
|
|
|
3
3
|
import os
|
|
4
4
|
import pickle
|
|
5
5
|
import warnings
|
|
6
|
-
from
|
|
6
|
+
from collections.abc import Iterable
|
|
7
|
+
from typing import Any, Optional, Union
|
|
7
8
|
|
|
8
9
|
import numpy as np
|
|
9
10
|
import pandas as pd
|
|
@@ -90,7 +91,7 @@ def convert2spark(data_frame: Optional[DataFrameLike]) -> Optional[SparkDataFram
|
|
|
90
91
|
def get_top_k(
|
|
91
92
|
dataframe: SparkDataFrame,
|
|
92
93
|
partition_by_col: Column,
|
|
93
|
-
order_by_col:
|
|
94
|
+
order_by_col: list[Column],
|
|
94
95
|
k: int,
|
|
95
96
|
) -> SparkDataFrame:
|
|
96
97
|
"""
|
|
@@ -393,7 +394,7 @@ def horizontal_explode(
|
|
|
393
394
|
data_frame: SparkDataFrame,
|
|
394
395
|
column_to_explode: str,
|
|
395
396
|
prefix: str,
|
|
396
|
-
other_columns:
|
|
397
|
+
other_columns: list[Column],
|
|
397
398
|
) -> SparkDataFrame:
|
|
398
399
|
"""
|
|
399
400
|
Transform a column with an array of values into separate columns.
|
|
@@ -509,7 +510,7 @@ def unpersist_if_exists(dataframe: Optional[SparkDataFrame]) -> None:
|
|
|
509
510
|
def join_with_col_renaming(
|
|
510
511
|
left: SparkDataFrame,
|
|
511
512
|
right: SparkDataFrame,
|
|
512
|
-
on_col_name: Union[str,
|
|
513
|
+
on_col_name: Union[str, list],
|
|
513
514
|
how: str = "inner",
|
|
514
515
|
suffix="join",
|
|
515
516
|
) -> SparkDataFrame:
|
|
@@ -698,7 +699,7 @@ def filter_cold(
|
|
|
698
699
|
df: Optional[SparkDataFrame],
|
|
699
700
|
warm_df: SparkDataFrame,
|
|
700
701
|
col_name: str,
|
|
701
|
-
) ->
|
|
702
|
+
) -> tuple[int, Optional[SparkDataFrame]]:
|
|
702
703
|
"""
|
|
703
704
|
Filter out new user/item ids absent in `warm_df`.
|
|
704
705
|
Return number of new users/items and filtered dataframe.
|
replay/utils/types.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
|
+
from collections.abc import Iterable
|
|
1
2
|
from importlib.util import find_spec
|
|
2
|
-
from typing import
|
|
3
|
+
from typing import Union
|
|
3
4
|
|
|
4
5
|
from pandas import DataFrame as PandasDataFrame
|
|
5
6
|
from polars import DataFrame as PolarsDataFrame
|
|
@@ -46,3 +47,4 @@ ANN_AVAILABLE = all(
|
|
|
46
47
|
)
|
|
47
48
|
OPENVINO_AVAILABLE = TORCH_AVAILABLE and find_spec("onnx") and find_spec("openvino")
|
|
48
49
|
OPTUNA_AVAILABLE = find_spec("optuna")
|
|
50
|
+
LIGHTFM_AVAILABLE = find_spec("lightfm")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: replay-rec
|
|
3
|
-
Version: 0.20.
|
|
3
|
+
Version: 0.20.1
|
|
4
4
|
Summary: RecSys Library
|
|
5
5
|
License-Expression: Apache-2.0
|
|
6
6
|
License-File: LICENSE
|
|
@@ -245,6 +245,12 @@ pip install openvino onnx
|
|
|
245
245
|
pip install hnswlib fixed-install-nmslib
|
|
246
246
|
```
|
|
247
247
|
|
|
248
|
+
4) (Experimental) LightFM model support:
|
|
249
|
+
```bash
|
|
250
|
+
pip install ligfhtfm
|
|
251
|
+
```
|
|
252
|
+
> **_NOTE_** : LightFM is not officially supported for Python 3.12 due to discontinued maintenance of the library. If you wish to install it locally, you'll have to use a patched fork of LightFM, such as the [one used internally](https://github.com/daviddavo/lightfm).
|
|
253
|
+
|
|
248
254
|
|
|
249
255
|
<a name="examples"></a>
|
|
250
256
|
## 📑 Resources
|
|
@@ -1,45 +1,45 @@
|
|
|
1
|
-
replay/__init__.py,sha256=
|
|
1
|
+
replay/__init__.py,sha256=lcQXZRcrHeoQ5A4N4ZuFAUfXTICM00LWZ-6_KwdDxmM,225
|
|
2
2
|
replay/data/__init__.py,sha256=g5bKRyF76QL_BqlED-31RnS8pBdcyj9loMsx5vAG_0E,301
|
|
3
|
-
replay/data/dataset.py,sha256=
|
|
3
|
+
replay/data/dataset.py,sha256=yQDc8lfphQYfHpm_T1MhnG8_GyM4ONyxJoFc1rUgdJ8,30755
|
|
4
4
|
replay/data/dataset_utils/__init__.py,sha256=9wUvG8ZwGUvuzLU4zQI5FDcH0WVVo5YLN2ey3DterP0,55
|
|
5
|
-
replay/data/dataset_utils/dataset_label_encoder.py,sha256=
|
|
5
|
+
replay/data/dataset_utils/dataset_label_encoder.py,sha256=bxuJPhShFZBok7bQZYGNMV1etCLNTJUpyKO5MIwWack,9823
|
|
6
6
|
replay/data/nn/__init__.py,sha256=nj2Ep-tduuQkc-TnBkvN8-rDnFbcWO2oZrfcXl9M3C8,1122
|
|
7
|
-
replay/data/nn/schema.py,sha256=
|
|
8
|
-
replay/data/nn/sequence_tokenizer.py,sha256=
|
|
9
|
-
replay/data/nn/sequential_dataset.py,sha256=
|
|
10
|
-
replay/data/nn/torch_sequential_dataset.py,sha256=
|
|
11
|
-
replay/data/nn/utils.py,sha256=
|
|
12
|
-
replay/data/schema.py,sha256=
|
|
7
|
+
replay/data/nn/schema.py,sha256=h1KgaNV-hgN9Vpt24c92EmeMpm_8W0s9a2M0wLxJHYk,17101
|
|
8
|
+
replay/data/nn/sequence_tokenizer.py,sha256=_9fBF-84jdn8Pa3pFKIr6prUjNYCc6BVzwRl9VSleKQ,37419
|
|
9
|
+
replay/data/nn/sequential_dataset.py,sha256=qthp87SQ44VpgoH3RKsqm6CxCeQyApn58l7_16txAZM,11303
|
|
10
|
+
replay/data/nn/torch_sequential_dataset.py,sha256=QSh4IM2vzAF095_ZMC1gMqZj9slHXos9gfx_R_DlpGM,11545
|
|
11
|
+
replay/data/nn/utils.py,sha256=Ic3G4yZRIzBYXLmwP1VstlZXPNR7AYGCc5EyZAERp5c,3297
|
|
12
|
+
replay/data/schema.py,sha256=JmYLCrNgBS5oq4O_PT724Gr1pDurHEykcqV8Xaj0XTw,15922
|
|
13
13
|
replay/data/spark_schema.py,sha256=4o0Kn_fjwz2-9dBY3q46F9PL0F3E7jdVpIlX7SG3OZI,1111
|
|
14
14
|
replay/metrics/__init__.py,sha256=j0PGvUehaPEZMNo9SQwJsnvzrS4bam9eHrRMQFLnMjY,2813
|
|
15
|
-
replay/metrics/base_metric.py,sha256=
|
|
16
|
-
replay/metrics/categorical_diversity.py,sha256=
|
|
17
|
-
replay/metrics/coverage.py,sha256=
|
|
15
|
+
replay/metrics/base_metric.py,sha256=ejtwFHktN4J8Fi1HIM3w0zlMAd8nO7-XpFi2D1iHXUQ,16010
|
|
16
|
+
replay/metrics/categorical_diversity.py,sha256=3tp8n457Ob4gjM-UTB5N19u9WAF7fLDkWKk-Mth-Vzc,10769
|
|
17
|
+
replay/metrics/coverage.py,sha256=e6vPItrRlI-mLNuOT5uoo5lMAAzkYGKZRxvupi21dMk,8528
|
|
18
18
|
replay/metrics/descriptors.py,sha256=BHORyGKfJgPeUjgLO0u2urSTe16UQbb-HHh8soqnwDE,3893
|
|
19
|
-
replay/metrics/experiment.py,sha256=
|
|
20
|
-
replay/metrics/hitrate.py,sha256=
|
|
21
|
-
replay/metrics/map.py,sha256=
|
|
22
|
-
replay/metrics/mrr.py,sha256=
|
|
23
|
-
replay/metrics/ndcg.py,sha256=
|
|
24
|
-
replay/metrics/novelty.py,sha256=
|
|
25
|
-
replay/metrics/offline_metrics.py,sha256=
|
|
26
|
-
replay/metrics/precision.py,sha256=
|
|
27
|
-
replay/metrics/recall.py,sha256=
|
|
28
|
-
replay/metrics/rocauc.py,sha256=
|
|
29
|
-
replay/metrics/surprisal.py,sha256=
|
|
30
|
-
replay/metrics/torch_metrics_builder.py,sha256=
|
|
31
|
-
replay/metrics/unexpectedness.py,sha256=
|
|
19
|
+
replay/metrics/experiment.py,sha256=6Sw8PyItn3E2R-BBa_YwrmtBV3n0uAGHHOvkhHYgMz4,8125
|
|
20
|
+
replay/metrics/hitrate.py,sha256=LcOJLMs3_Dq4_pbKx95qdCdjGrX52dyWyuWUFXCyaDw,2314
|
|
21
|
+
replay/metrics/map.py,sha256=dIZcmUxd2XnNC7d_d7gmq0cjNaI1hlNMaJTSHGCokQE,2572
|
|
22
|
+
replay/metrics/mrr.py,sha256=qM8tVMSoyYR-kTx0mnBGppoC53SxNlZKm7JKMUmSv9U,2163
|
|
23
|
+
replay/metrics/ndcg.py,sha256=izajmD243ZIK3KLm9M-NtLwxb9N3Ktj58__AAfwF6Vc,3110
|
|
24
|
+
replay/metrics/novelty.py,sha256=j3p1fbUVi2QQgEre42jeQx73PYYDUhy5gYlrL4BL5b8,5488
|
|
25
|
+
replay/metrics/offline_metrics.py,sha256=f_U4Tk3Ke5sR0_OYvoE2_nD6wrOCveg3DM3B9pStVUI,20454
|
|
26
|
+
replay/metrics/precision.py,sha256=DRlsgY_b4bJCOSZjCA58N41REMiDt-dbagRSXxfXyvY,2256
|
|
27
|
+
replay/metrics/recall.py,sha256=fzpASDiH88zcpXJZTbStQ3nuzzSdhd9k1wjF27rM4wc,2447
|
|
28
|
+
replay/metrics/rocauc.py,sha256=1vaVEK7DQTL8BX-i7A64hTFWyO38aNycscPGrdWKwbA,3282
|
|
29
|
+
replay/metrics/surprisal.py,sha256=HkmYrOuw3jydxFrkidjdcpAcKz2DeOnMsKqwB2g9pwY,7526
|
|
30
|
+
replay/metrics/torch_metrics_builder.py,sha256=jccdTNXJrwiWmBoD9cB3ilIn-upKhR6toAfKTG5T2Mc,13855
|
|
31
|
+
replay/metrics/unexpectedness.py,sha256=LSi-z50l3_yrvLnmToHQzm6Ygf2QpNt_zhk6jdg7QUo,6882
|
|
32
32
|
replay/models/__init__.py,sha256=kECYluQZ83zRUWaHVvnt7Tg3BerHrJy9v8XfRxsqyYY,1123
|
|
33
|
-
replay/models/als.py,sha256=
|
|
34
|
-
replay/models/association_rules.py,sha256=
|
|
35
|
-
replay/models/base_neighbour_rec.py,sha256=
|
|
36
|
-
replay/models/base_rec.py,sha256=
|
|
37
|
-
replay/models/cat_pop_rec.py,sha256=
|
|
33
|
+
replay/models/als.py,sha256=1MFAbcx64tv0MX1wE9CM1NxKD3F3ZDhZUrmt6dvHu74,6220
|
|
34
|
+
replay/models/association_rules.py,sha256=shBNsKjlii0YK-XA6bSl5Ov0ZFTnjxZbgKJU9PFYptY,14507
|
|
35
|
+
replay/models/base_neighbour_rec.py,sha256=SdGb2ejpYjHmxFNTk5zwEo0RWdfPAj1vKGP_oj7IrQo,7783
|
|
36
|
+
replay/models/base_rec.py,sha256=aNIEbSy8G5q92NOpDlSJbp0Z-lAkazFLa9eDAajl1wI,56067
|
|
37
|
+
replay/models/cat_pop_rec.py,sha256=ed1X1PDQY41hFJ1cO3Q5OWy0rXhV5_n23hJ-QHWONtE,11968
|
|
38
38
|
replay/models/cluster.py,sha256=9JcpGnbfgFa4UsyxPAa4WMuJFa3rsuAxiKoy-s_UfyE,4970
|
|
39
39
|
replay/models/common.py,sha256=rFmfwwzWCWED2HaDVuSN7ZUAgaNPGPawUudgn4IApbo,2121
|
|
40
40
|
replay/models/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
41
41
|
replay/models/extensions/ann/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
42
|
-
replay/models/extensions/ann/ann_mixin.py,sha256=
|
|
42
|
+
replay/models/extensions/ann/ann_mixin.py,sha256=Ua1fuwrvtISNDQ8iPV-ln8S1LDKz8-rIU2UYsMExAiU,7782
|
|
43
43
|
replay/models/extensions/ann/entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
44
44
|
replay/models/extensions/ann/entities/base_hnsw_param.py,sha256=5GRdcQj4-zhNXfJ7ko2WHGHgRuXCzSHCRcRxljl1V4c,776
|
|
45
45
|
replay/models/extensions/ann/entities/hnswlib_param.py,sha256=j3V4JXM_yfR6s2TjYakIXMg-zS1-MrP6an930DEIWGM,2104
|
|
@@ -48,8 +48,8 @@ replay/models/extensions/ann/index_builders/__init__.py,sha256=47DEQpj8HBSa-_TIm
|
|
|
48
48
|
replay/models/extensions/ann/index_builders/base_index_builder.py,sha256=Ul25G0FaNLOXUjrDXxZDTg7tLXlv1N6wR8kWjWICtZ0,2110
|
|
49
49
|
replay/models/extensions/ann/index_builders/driver_hnswlib_index_builder.py,sha256=U8-3lRahyWmWkZ7tYuO-Avd1jX-lGh7JukC140wJ-WQ,1600
|
|
50
50
|
replay/models/extensions/ann/index_builders/driver_nmslib_index_builder.py,sha256=1NLWyAJGYgp46uUBhUYQyd0stmG6DhLh7U4JEne5TFw,1308
|
|
51
|
-
replay/models/extensions/ann/index_builders/executor_hnswlib_index_builder.py,sha256=
|
|
52
|
-
replay/models/extensions/ann/index_builders/executor_nmslib_index_builder.py,sha256=
|
|
51
|
+
replay/models/extensions/ann/index_builders/executor_hnswlib_index_builder.py,sha256=cf3LhBCRRN-lBYGlJbv8vnY-KVeHAleN5cVjvd58Ibs,2476
|
|
52
|
+
replay/models/extensions/ann/index_builders/executor_nmslib_index_builder.py,sha256=0DPJ3WAt0cZ5dmtZv87fmMEgYXWf8rM35f7CA_DgWZY,2618
|
|
53
53
|
replay/models/extensions/ann/index_builders/nmslib_index_builder_mixin.py,sha256=AIkVnobesnTM5lrBSWf9gd0CySwFQ0vH_DjemfLS4Cs,1925
|
|
54
54
|
replay/models/extensions/ann/index_inferers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
55
55
|
replay/models/extensions/ann/index_inferers/base_inferer.py,sha256=I39aqEc2somfndrCd-KC3XYZnYSrJ2hGpR9y6wO93NA,2524
|
|
@@ -67,73 +67,72 @@ replay/models/extensions/ann/index_stores/utils.py,sha256=6r2GP_EFCaCguolW857pb4
|
|
|
67
67
|
replay/models/extensions/ann/utils.py,sha256=AgQvThi_DvEtakQeTno9hVZVWiWMFHKTjRcQ2wLa5vk,1222
|
|
68
68
|
replay/models/kl_ucb.py,sha256=L6vC2KsTBTTx4ckmGhWybOiLa5Wt54N7cgl7jS2FQRg,6731
|
|
69
69
|
replay/models/knn.py,sha256=HEiGHHQg9pV1_EIWZHfK-XD0BNAm1bj1c0ND9rYnj3k,8992
|
|
70
|
-
replay/models/lin_ucb.py,sha256=
|
|
70
|
+
replay/models/lin_ucb.py,sha256=iAR3PbbaQKqmisOKEx9ZyfpxnxcZomr6YauG4mvSakU,18800
|
|
71
71
|
replay/models/nn/__init__.py,sha256=AT3o1qXaxUq4_QIGlcGuSs54ZpueOo-SbpZwuGI-6os,41
|
|
72
72
|
replay/models/nn/loss/__init__.py,sha256=s3iO9QTZvLz_ony2b5K0hEmDmitrXQnAe9j6BRxLpR4,53
|
|
73
73
|
replay/models/nn/loss/sce.py,sha256=p6LFtoYSY4j2pQh6Z7i6cEADCmRnvTgnb8EJXseRKKg,5637
|
|
74
74
|
replay/models/nn/optimizer_utils/__init__.py,sha256=8MHln7CW54oACVUFKdZLjAf4bY83GcOMXpmL87gTnhI,178
|
|
75
|
-
replay/models/nn/optimizer_utils/optimizer_factory.py,sha256=
|
|
75
|
+
replay/models/nn/optimizer_utils/optimizer_factory.py,sha256=1wicKnya2xrwDaHhqygy1VqB8-3jPDhMM7zY2TJE4dY,2844
|
|
76
76
|
replay/models/nn/sequential/__init__.py,sha256=CI2n0cxs_amqJrwBMq6n0Z_uBOu7CGXfagqvE4Jlmjw,128
|
|
77
77
|
replay/models/nn/sequential/bert4rec/__init__.py,sha256=JfZqHOGxcvOkICl5cWmZbZhaKXpkIvua-Wj57VWWEhw,399
|
|
78
|
-
replay/models/nn/sequential/bert4rec/dataset.py,sha256=
|
|
79
|
-
replay/models/nn/sequential/bert4rec/lightning.py,sha256=
|
|
80
|
-
replay/models/nn/sequential/bert4rec/model.py,sha256=
|
|
78
|
+
replay/models/nn/sequential/bert4rec/dataset.py,sha256=xd5a-yn5I280Vwoy_KtasDjrvksFolJYp71nDEHNUNQ,10414
|
|
79
|
+
replay/models/nn/sequential/bert4rec/lightning.py,sha256=_hP6_6E1SpGu6b_kiYEF4ZVhwKJ4sj_iPTo6loIvM0o,26546
|
|
80
|
+
replay/models/nn/sequential/bert4rec/model.py,sha256=2Lqvfz7UBB_ArqNs92OD5dy4a1onR4S5dNZiMbZgAgk,17388
|
|
81
81
|
replay/models/nn/sequential/callbacks/__init__.py,sha256=Q7mSZ_RB6iyD7QZaBL_NJ0uh8cRfgxq7gtPHbkSyhoo,282
|
|
82
|
-
replay/models/nn/sequential/callbacks/prediction_callbacks.py,sha256=
|
|
83
|
-
replay/models/nn/sequential/callbacks/validation_callback.py,sha256=
|
|
82
|
+
replay/models/nn/sequential/callbacks/prediction_callbacks.py,sha256=lkYoTOpyt-Gy9wOgOgGlFshTJP24VsDcWm_6pY5Xmyg,9296
|
|
83
|
+
replay/models/nn/sequential/callbacks/validation_callback.py,sha256=GcRWM_yVwRBRLTW1sYCy9_aNZ5C71hmJeGG61Yyh4vA,5812
|
|
84
84
|
replay/models/nn/sequential/compiled/__init__.py,sha256=eSVcCaUH5cDJQRbC7K99X7uMNR-Z-KR4TmYOGKWWJCI,531
|
|
85
|
-
replay/models/nn/sequential/compiled/base_compiled_model.py,sha256=
|
|
85
|
+
replay/models/nn/sequential/compiled/base_compiled_model.py,sha256=vOL-9jodvSNc7N32V4lTjRTCNM-tOPAxfMyZPENqsFA,10231
|
|
86
86
|
replay/models/nn/sequential/compiled/bert4rec_compiled.py,sha256=Z6nfmdT70Wi-j7_CDFJ88iNCp1gdQleg1WkfHp0hb4s,6400
|
|
87
87
|
replay/models/nn/sequential/compiled/sasrec_compiled.py,sha256=qUaAwQOsBCstOG3RBlj_pJpD8BHmCpLZWCiPBlFVvT4,5856
|
|
88
88
|
replay/models/nn/sequential/postprocessors/__init__.py,sha256=89LGzkNHukcuC2-rfpiz7vmv1zyk6MNY-8zaXrvtn0M,164
|
|
89
|
-
replay/models/nn/sequential/postprocessors/_base.py,sha256=
|
|
90
|
-
replay/models/nn/sequential/postprocessors/postprocessors.py,sha256=
|
|
89
|
+
replay/models/nn/sequential/postprocessors/_base.py,sha256=Q_SIYKG8G3U03IEK1dtlW1zJI300pOcWQYuMpkY0_nc,1111
|
|
90
|
+
replay/models/nn/sequential/postprocessors/postprocessors.py,sha256=oijLByxuzegVmWZS-qRVhdO7ihqHer6SSGTFa8zX7I8,7810
|
|
91
91
|
replay/models/nn/sequential/sasrec/__init__.py,sha256=c6130lRpPkcbuGgkM7slagBIgH7Uk5zUtSzFDEwAsik,250
|
|
92
|
-
replay/models/nn/sequential/sasrec/dataset.py,sha256=
|
|
93
|
-
replay/models/nn/sequential/sasrec/lightning.py,sha256=
|
|
94
|
-
replay/models/nn/sequential/sasrec/model.py,sha256=
|
|
92
|
+
replay/models/nn/sequential/sasrec/dataset.py,sha256=L_LeRWqPc__390j8NWVskboS0NqbveIkLwFclcB4oDw,7189
|
|
93
|
+
replay/models/nn/sequential/sasrec/lightning.py,sha256=oScUyB8RU8N4MqWe6kAoWG0JW6Tkb2ldG_jdGFZgA7A,25060
|
|
94
|
+
replay/models/nn/sequential/sasrec/model.py,sha256=8kFovyPWqgQ0hmD3gckRjW7-hLBerl3bgYXCk4PYn0o,27656
|
|
95
95
|
replay/models/optimization/__init__.py,sha256=N8xCuzu0jQGwHrIBjuTRf-ZcZuBJ6FB0d9C5a7izJQU,338
|
|
96
96
|
replay/models/optimization/optuna_mixin.py,sha256=pKu-Vw9l2LsDycubpdJiLkC1eE4pKrDG0T2lhUgRUB4,11960
|
|
97
97
|
replay/models/optimization/optuna_objective.py,sha256=UHWOJwBngPA3IRz9yAMEWPg00oyb7Wq9PXuRPYHIiLE,7538
|
|
98
98
|
replay/models/pop_rec.py,sha256=Ju9y2rU2vW_jFU9-W15fbbr5_ZzYGihSjSxsqKsAf0Q,4964
|
|
99
99
|
replay/models/query_pop_rec.py,sha256=UNsHtf3eQpJom73ZmEO5us4guI4SnCLJYTfuUpRgqes,4086
|
|
100
100
|
replay/models/random_rec.py,sha256=9SC012_X3sNzrAjDG1CPGhjisZb6gnv4VCW7yIMSNpk,8066
|
|
101
|
-
replay/models/slim.py,sha256=
|
|
101
|
+
replay/models/slim.py,sha256=OAdTS64bObZujzHkq8vfP1kkoLMSWxk1KLg6lCCA0N8,4551
|
|
102
102
|
replay/models/thompson_sampling.py,sha256=gcjlVl1mPiEVt70y8frA762O-eCZzd3SVg1lnDRCEHk,1939
|
|
103
|
-
replay/models/ucb.py,sha256=
|
|
103
|
+
replay/models/ucb.py,sha256=b2qFgvOAZcyv5triPk18duqF_jt-ty7mypenjRLNWwQ,6952
|
|
104
104
|
replay/models/wilson.py,sha256=o7aUWjq3648dAfgGBoWD5Gu-HzdyobPMaH2lzCLijiA,4558
|
|
105
|
-
replay/models/word2vec.py,sha256=
|
|
105
|
+
replay/models/word2vec.py,sha256=atfj6GjR_L-TdurRFr1yi7B3BicJ3ZdFxixW9RfojJs,8882
|
|
106
106
|
replay/preprocessing/__init__.py,sha256=c6wFPAc6lATyp0lE-ZDjHMsXyEMPKX7Usuqylv6H5XQ,597
|
|
107
107
|
replay/preprocessing/converter.py,sha256=JQ-4u5x0eXtswl1iH-bZITBXQov1nebnZ6XcvpD8Twk,4417
|
|
108
|
-
replay/preprocessing/discretizer.py,sha256=
|
|
109
|
-
replay/preprocessing/filters.py,sha256=
|
|
110
|
-
replay/preprocessing/history_based_fp.py,sha256=
|
|
111
|
-
replay/preprocessing/label_encoder.py,sha256=
|
|
108
|
+
replay/preprocessing/discretizer.py,sha256=jzYqvoSVmiL-oS-ri9Om0vSDoU8bCQimjUoe7FiPfLU,27024
|
|
109
|
+
replay/preprocessing/filters.py,sha256=C0zR4LOnGJsMzowuWfaTPR457RppgLZRhcZFV1WkS7o,45845
|
|
110
|
+
replay/preprocessing/history_based_fp.py,sha256=oEu1CkCz7xcGbPdSTHfhTe1NimnFo50Arn8qngRBgE8,18702
|
|
111
|
+
replay/preprocessing/label_encoder.py,sha256=eWsPa5mZq7_9SDxkaiI8mpCfIKTKNr-tlNmfqEunnTk,41432
|
|
112
112
|
replay/preprocessing/sessionizer.py,sha256=G6i0K3FwqtweRxvcSYraJ-tBWAT2HnV-bWHHlIZJF-s,12217
|
|
113
113
|
replay/scenarios/__init__.py,sha256=XXAKEQPTLlve-0O6NPwFgahFrb4oGcIq3HaYaaGxG2E,94
|
|
114
|
-
replay/scenarios/fallback.py,sha256=
|
|
114
|
+
replay/scenarios/fallback.py,sha256=dO3s9jqYup4rbgMaY6Z6HGm1r7SXkm7jOvNZDr5zm_U,7138
|
|
115
115
|
replay/splitters/__init__.py,sha256=DnqVMelrzLwR8fGQgcWN_8FipGs8T4XGSPOMW-L_x2g,454
|
|
116
|
-
replay/splitters/base_splitter.py,sha256=
|
|
117
|
-
replay/splitters/cold_user_random_splitter.py,sha256=
|
|
118
|
-
replay/splitters/k_folds.py,sha256=
|
|
119
|
-
replay/splitters/last_n_splitter.py,sha256=
|
|
120
|
-
replay/splitters/new_users_splitter.py,sha256=
|
|
121
|
-
replay/splitters/random_splitter.py,sha256=
|
|
122
|
-
replay/splitters/ratio_splitter.py,sha256=
|
|
123
|
-
replay/splitters/time_splitter.py,sha256=
|
|
124
|
-
replay/splitters/two_stage_splitter.py,sha256=
|
|
125
|
-
replay/utils/__init__.py,sha256=
|
|
126
|
-
replay/utils/common.py,sha256=
|
|
116
|
+
replay/splitters/base_splitter.py,sha256=zvYVEHBYrK8Y2qPv3kYachfLFwR9-kUAiU1UJSNGS8A,7749
|
|
117
|
+
replay/splitters/cold_user_random_splitter.py,sha256=32VgAHiwk9Emkofu1KqwGZrrFiyrYtSQ3YPdt5p_XoQ,4423
|
|
118
|
+
replay/splitters/k_folds.py,sha256=RDDL3gE6M5qfK5Ig-pxxJeq3O4uxsWJjLFQRRzQ2Ssg,6211
|
|
119
|
+
replay/splitters/last_n_splitter.py,sha256=hMWIGYFg17LioT08VBXut5Ic-w9oXsKd739cy2xuwYs,15368
|
|
120
|
+
replay/splitters/new_users_splitter.py,sha256=NksAdl_wL9zwHj3cY5NqrrnkOajgyUDloSsRZ9HUE48,9160
|
|
121
|
+
replay/splitters/random_splitter.py,sha256=0DO0qulT0jp_GXswmFh3BMJ7utS-z9e-r5jIrmTKGC4,2989
|
|
122
|
+
replay/splitters/ratio_splitter.py,sha256=rFWN-nKBYx1qKrmtYzjYf08DWFiKOCo5ZRUz-NHJFfs,17506
|
|
123
|
+
replay/splitters/time_splitter.py,sha256=0ZAMK26b--1wjrfzCuNVBh7gMPTa8SGf4LMEgACiUxA,9013
|
|
124
|
+
replay/splitters/two_stage_splitter.py,sha256=8Zn6BTJmZg04CD4l2jmil2dEu6xtglJaSS5mkotIXRc,17823
|
|
125
|
+
replay/utils/__init__.py,sha256=3Skc9bUISEPPMMxdUCCT_S1q-i7cAT3KT0nExe-VMrw,343
|
|
126
|
+
replay/utils/common.py,sha256=92MTG51WpeEQJ2gu-WvdNe4Fmqm8ze-y1VNIAHW81jQ,5358
|
|
127
127
|
replay/utils/dataframe_bucketizer.py,sha256=LipmBBQkdkLGroZpbP9i7qvTombLdMxo2dUUys1m5OY,3748
|
|
128
128
|
replay/utils/distributions.py,sha256=UuhaC9HI6HnUXW97fEd-TsyDk4JT8t7k1T_6l5FpOMs,1203
|
|
129
129
|
replay/utils/model_handler.py,sha256=6WRyd39B-UXTtKTHWD_ssYN1vMmkjd417bwKb50uqJY,5754
|
|
130
|
-
replay/utils/session_handler.py,sha256=
|
|
131
|
-
replay/utils/spark_utils.py,sha256=
|
|
130
|
+
replay/utils/session_handler.py,sha256=fQo2wseow8yuzKnEXT-aYAXcQIgRbTTXp0v7g1VVi0w,5138
|
|
131
|
+
replay/utils/spark_utils.py,sha256=GbRp-MuUoO3Pc4chFvlmo9FskSlRLeNlC3Go5pEJ6Ok,27411
|
|
132
132
|
replay/utils/time.py,sha256=J8asoQBytPcNw-BLGADYIsKeWhIoN1H5hKiX9t2AMqo,9376
|
|
133
|
-
replay/utils/types.py,sha256=
|
|
134
|
-
|
|
135
|
-
replay_rec-0.20.
|
|
136
|
-
replay_rec-0.20.
|
|
137
|
-
replay_rec-0.20.
|
|
138
|
-
replay_rec-0.20.
|
|
139
|
-
replay_rec-0.20.0.dist-info/RECORD,,
|
|
133
|
+
replay/utils/types.py,sha256=rD9q9CqEXgF4yy512Hv2nXclvwcnfodOnhBZ1HSUI4c,1260
|
|
134
|
+
replay_rec-0.20.1.dist-info/METADATA,sha256=gHDb26sYFSRyjHopOesLiM_Dtvf4b36mXdpULv2vx8o,13562
|
|
135
|
+
replay_rec-0.20.1.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
|
|
136
|
+
replay_rec-0.20.1.dist-info/licenses/LICENSE,sha256=rPmcA7UrHxBChEAAlJyE24qUWKKl9yLQXxFsKeg_LX4,11344
|
|
137
|
+
replay_rec-0.20.1.dist-info/licenses/NOTICE,sha256=k0bo4KHiHLRax5K3XKTTrf2Fi8V91mJ-R3FMdh6Reg0,2002
|
|
138
|
+
replay_rec-0.20.1.dist-info/RECORD,,
|
replay/utils/warnings.py
DELETED
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
import functools
|
|
2
|
-
import warnings
|
|
3
|
-
from collections.abc import Callable
|
|
4
|
-
from typing import Any, Optional
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
def deprecation_warning(message: Optional[str] = None) -> Callable[..., Any]:
|
|
8
|
-
"""
|
|
9
|
-
Decorator that throws deprecation warnings.
|
|
10
|
-
|
|
11
|
-
:param message: message to deprecation warning without func name.
|
|
12
|
-
"""
|
|
13
|
-
base_msg = "will be deprecated in future versions."
|
|
14
|
-
|
|
15
|
-
def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
|
|
16
|
-
@functools.wraps(func)
|
|
17
|
-
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
|
18
|
-
msg = f"{func.__qualname__} {message if message else base_msg}"
|
|
19
|
-
warnings.simplefilter("always", DeprecationWarning) # turn off filter
|
|
20
|
-
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
|
|
21
|
-
warnings.simplefilter("default", DeprecationWarning) # reset filter
|
|
22
|
-
return func(*args, **kwargs)
|
|
23
|
-
|
|
24
|
-
return wrapper
|
|
25
|
-
|
|
26
|
-
return decorator
|
|
File without changes
|
|
File without changes
|
|
File without changes
|