replay-rec 0.20.2__py3-none-any.whl → 0.20.3rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. replay/__init__.py +1 -1
  2. replay/data/nn/sequential_dataset.py +8 -2
  3. replay/experimental/__init__.py +0 -0
  4. replay/experimental/metrics/__init__.py +62 -0
  5. replay/experimental/metrics/base_metric.py +603 -0
  6. replay/experimental/metrics/coverage.py +97 -0
  7. replay/experimental/metrics/experiment.py +175 -0
  8. replay/experimental/metrics/hitrate.py +26 -0
  9. replay/experimental/metrics/map.py +30 -0
  10. replay/experimental/metrics/mrr.py +18 -0
  11. replay/experimental/metrics/ncis_precision.py +31 -0
  12. replay/experimental/metrics/ndcg.py +49 -0
  13. replay/experimental/metrics/precision.py +22 -0
  14. replay/experimental/metrics/recall.py +25 -0
  15. replay/experimental/metrics/rocauc.py +49 -0
  16. replay/experimental/metrics/surprisal.py +90 -0
  17. replay/experimental/metrics/unexpectedness.py +76 -0
  18. replay/experimental/models/__init__.py +50 -0
  19. replay/experimental/models/admm_slim.py +257 -0
  20. replay/experimental/models/base_neighbour_rec.py +200 -0
  21. replay/experimental/models/base_rec.py +1386 -0
  22. replay/experimental/models/base_torch_rec.py +234 -0
  23. replay/experimental/models/cql.py +454 -0
  24. replay/experimental/models/ddpg.py +932 -0
  25. replay/experimental/models/dt4rec/__init__.py +0 -0
  26. replay/experimental/models/dt4rec/dt4rec.py +189 -0
  27. replay/experimental/models/dt4rec/gpt1.py +401 -0
  28. replay/experimental/models/dt4rec/trainer.py +127 -0
  29. replay/experimental/models/dt4rec/utils.py +264 -0
  30. replay/experimental/models/extensions/spark_custom_models/__init__.py +0 -0
  31. replay/experimental/models/extensions/spark_custom_models/als_extension.py +792 -0
  32. replay/experimental/models/hierarchical_recommender.py +331 -0
  33. replay/experimental/models/implicit_wrap.py +131 -0
  34. replay/experimental/models/lightfm_wrap.py +303 -0
  35. replay/experimental/models/mult_vae.py +332 -0
  36. replay/experimental/models/neural_ts.py +986 -0
  37. replay/experimental/models/neuromf.py +406 -0
  38. replay/experimental/models/scala_als.py +293 -0
  39. replay/experimental/models/u_lin_ucb.py +115 -0
  40. replay/experimental/nn/data/__init__.py +1 -0
  41. replay/experimental/nn/data/schema_builder.py +102 -0
  42. replay/experimental/preprocessing/__init__.py +3 -0
  43. replay/experimental/preprocessing/data_preparator.py +839 -0
  44. replay/experimental/preprocessing/padder.py +229 -0
  45. replay/experimental/preprocessing/sequence_generator.py +208 -0
  46. replay/experimental/scenarios/__init__.py +1 -0
  47. replay/experimental/scenarios/obp_wrapper/__init__.py +8 -0
  48. replay/experimental/scenarios/obp_wrapper/obp_optuna_objective.py +74 -0
  49. replay/experimental/scenarios/obp_wrapper/replay_offline.py +261 -0
  50. replay/experimental/scenarios/obp_wrapper/utils.py +85 -0
  51. replay/experimental/scenarios/two_stages/__init__.py +0 -0
  52. replay/experimental/scenarios/two_stages/reranker.py +117 -0
  53. replay/experimental/scenarios/two_stages/two_stages_scenario.py +757 -0
  54. replay/experimental/utils/__init__.py +0 -0
  55. replay/experimental/utils/logger.py +24 -0
  56. replay/experimental/utils/model_handler.py +186 -0
  57. replay/experimental/utils/session_handler.py +44 -0
  58. {replay_rec-0.20.2.dist-info → replay_rec-0.20.3rc0.dist-info}/METADATA +11 -17
  59. {replay_rec-0.20.2.dist-info → replay_rec-0.20.3rc0.dist-info}/RECORD +62 -7
  60. {replay_rec-0.20.2.dist-info → replay_rec-0.20.3rc0.dist-info}/WHEEL +0 -0
  61. {replay_rec-0.20.2.dist-info → replay_rec-0.20.3rc0.dist-info}/licenses/LICENSE +0 -0
  62. {replay_rec-0.20.2.dist-info → replay_rec-0.20.3rc0.dist-info}/licenses/NOTICE +0 -0
@@ -0,0 +1,792 @@
1
+ #
2
+ # Licensed to the Apache Software Foundation (ASF) under one or more
3
+ # contributor license agreements. See the NOTICE file distributed with
4
+ # this work for additional information regarding copyright ownership.
5
+ # The ASF licenses this file to You under the Apache License, Version 2.0
6
+ # (the "License"); you may not use this file except in compliance with
7
+ # the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ #
17
+
18
+ #
19
+ # Modifications copyright (c) 2023 Azamat G.
20
+ #
21
+
22
+ import contextlib
23
+ import sys
24
+
25
+ from replay.utils import PYSPARK_AVAILABLE
26
+
27
+ if PYSPARK_AVAILABLE:
28
+ from pyspark import keyword_only, since
29
+ from pyspark.ml.common import inherit_doc
30
+ from pyspark.ml.param import Param, Params, TypeConverters
31
+ from pyspark.ml.param.shared import (
32
+ HasBlockSize,
33
+ HasCheckpointInterval,
34
+ HasMaxIter,
35
+ HasPredictionCol,
36
+ HasRegParam,
37
+ HasSeed,
38
+ )
39
+ from pyspark.ml.util import JavaMLReadable, JavaMLReader, JavaMLWritable, MLReadable, _jvm
40
+ from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams
41
+
42
+ __all__ = ["ALS", "ALSModel"]
43
+
44
+
45
+ class ALSModelJavaMLReadable(MLReadable):
46
+ """
47
+ (Private) Mixin for instances that provide JavaMLReader.
48
+ """
49
+
50
+ @classmethod
51
+ def read(cls):
52
+ """Returns an MLReader instance for this class."""
53
+ return ALSModelJavaMLReader(cls)
54
+
55
+
56
+ class ALSModelJavaMLReader(JavaMLReader):
57
+ """
58
+ Custom reader that set java class explicitly.
59
+ """
60
+
61
+ @classmethod
62
+ def _load_java_obj(cls, clazz): # noqa: ARG003
63
+ """Load the peer Java object of the ML instance."""
64
+ java_class = "org.apache.spark.ml.recommendation.replay.ReplayALSModel"
65
+ java_obj = _jvm()
66
+ for name in java_class.split("."):
67
+ java_obj = getattr(java_obj, name)
68
+ return java_obj
69
+
70
+
71
+ @inherit_doc
72
+ class _ALSModelParams(HasPredictionCol, HasBlockSize):
73
+ """
74
+ Params for :py:class:`ALS` and :py:class:`ALSModel`.
75
+
76
+ .. versionadded:: 3.0.0
77
+ """
78
+
79
+ userCol = Param(
80
+ Params._dummy(),
81
+ "userCol",
82
+ "column name for user ids. Ids must be within " + "the integer value range.",
83
+ typeConverter=TypeConverters.toString,
84
+ )
85
+ itemCol = Param(
86
+ Params._dummy(),
87
+ "itemCol",
88
+ "column name for item ids. Ids must be within " + "the integer value range.",
89
+ typeConverter=TypeConverters.toString,
90
+ )
91
+ coldStartStrategy = Param(
92
+ Params._dummy(),
93
+ "coldStartStrategy",
94
+ "strategy for dealing with "
95
+ "unknown or new users/items at prediction time. This may be useful "
96
+ "in cross-validation or production scenarios, for handling "
97
+ "user/item ids the model has not seen in the training data. "
98
+ "Supported values: 'nan', 'drop'.",
99
+ typeConverter=TypeConverters.toString,
100
+ )
101
+
102
+ def __init__(self, *args):
103
+ super().__init__(*args)
104
+ self._setDefault(blockSize=4096)
105
+
106
+ @since("1.4.0")
107
+ def getUserCol(self):
108
+ """
109
+ Gets the value of userCol or its default value.
110
+ """
111
+ return self.getOrDefault(self.userCol)
112
+
113
+ @since("1.4.0")
114
+ def getItemCol(self):
115
+ """
116
+ Gets the value of itemCol or its default value.
117
+ """
118
+ return self.getOrDefault(self.itemCol)
119
+
120
+ @since("2.2.0")
121
+ def getColdStartStrategy(self):
122
+ """
123
+ Gets the value of coldStartStrategy or its default value.
124
+ """
125
+ return self.getOrDefault(self.coldStartStrategy)
126
+
127
+
128
+ @inherit_doc
129
+ class _ALSParams(_ALSModelParams, HasMaxIter, HasRegParam, HasCheckpointInterval, HasSeed):
130
+ """
131
+ Params for :py:class:`ALS`.
132
+
133
+ .. versionadded:: 3.0.0
134
+ """
135
+
136
+ rank = Param(Params._dummy(), "rank", "rank of the factorization", typeConverter=TypeConverters.toInt)
137
+ numUserBlocks = Param(Params._dummy(), "numUserBlocks", "number of user blocks", typeConverter=TypeConverters.toInt)
138
+ numItemBlocks = Param(Params._dummy(), "numItemBlocks", "number of item blocks", typeConverter=TypeConverters.toInt)
139
+ implicitPrefs = Param(
140
+ Params._dummy(), "implicitPrefs", "whether to use implicit preference", typeConverter=TypeConverters.toBoolean
141
+ )
142
+ alpha = Param(Params._dummy(), "alpha", "alpha for implicit preference", typeConverter=TypeConverters.toFloat)
143
+
144
+ ratingCol = Param(Params._dummy(), "ratingCol", "column name for ratings", typeConverter=TypeConverters.toString)
145
+ nonnegative = Param(
146
+ Params._dummy(),
147
+ "nonnegative",
148
+ "whether to use nonnegative constraint for least squares",
149
+ typeConverter=TypeConverters.toBoolean,
150
+ )
151
+ intermediateStorageLevel = Param(
152
+ Params._dummy(),
153
+ "intermediateStorageLevel",
154
+ "StorageLevel for intermediate datasets. Cannot be 'NONE'.",
155
+ typeConverter=TypeConverters.toString,
156
+ )
157
+ finalStorageLevel = Param(
158
+ Params._dummy(),
159
+ "finalStorageLevel",
160
+ "StorageLevel for ALS model factors.",
161
+ typeConverter=TypeConverters.toString,
162
+ )
163
+
164
+ def __init__(self, *args):
165
+ super().__init__(*args)
166
+ self._setDefault(
167
+ rank=10,
168
+ maxIter=10,
169
+ regParam=0.1,
170
+ numUserBlocks=10,
171
+ numItemBlocks=10,
172
+ implicitPrefs=False,
173
+ alpha=1.0,
174
+ userCol="user",
175
+ itemCol="item",
176
+ ratingCol="rating",
177
+ nonnegative=False,
178
+ checkpointInterval=10,
179
+ intermediateStorageLevel="MEMORY_AND_DISK",
180
+ finalStorageLevel="MEMORY_AND_DISK",
181
+ coldStartStrategy="nan",
182
+ )
183
+
184
+ @since("1.4.0")
185
+ def getRank(self):
186
+ """
187
+ Gets the value of rank or its default value.
188
+ """
189
+ return self.getOrDefault(self.rank)
190
+
191
+ @since("1.4.0")
192
+ def getNumUserBlocks(self):
193
+ """
194
+ Gets the value of numUserBlocks or its default value.
195
+ """
196
+ return self.getOrDefault(self.numUserBlocks)
197
+
198
+ @since("1.4.0")
199
+ def getNumItemBlocks(self):
200
+ """
201
+ Gets the value of numItemBlocks or its default value.
202
+ """
203
+ return self.getOrDefault(self.numItemBlocks)
204
+
205
+ @since("1.4.0")
206
+ def getImplicitPrefs(self):
207
+ """
208
+ Gets the value of implicitPrefs or its default value.
209
+ """
210
+ return self.getOrDefault(self.implicitPrefs)
211
+
212
+ @since("1.4.0")
213
+ def getAlpha(self):
214
+ """
215
+ Gets the value of alpha or its default value.
216
+ """
217
+ return self.getOrDefault(self.alpha)
218
+
219
+ @since("1.4.0")
220
+ def getRatingCol(self):
221
+ """
222
+ Gets the value of ratingCol or its default value.
223
+ """
224
+ return self.getOrDefault(self.ratingCol)
225
+
226
+ @since("1.4.0")
227
+ def getNonnegative(self):
228
+ """
229
+ Gets the value of nonnegative or its default value.
230
+ """
231
+ return self.getOrDefault(self.nonnegative)
232
+
233
+ @since("2.0.0")
234
+ def getIntermediateStorageLevel(self):
235
+ """
236
+ Gets the value of intermediateStorageLevel or its default value.
237
+ """
238
+ return self.getOrDefault(self.intermediateStorageLevel)
239
+
240
+ @since("2.0.0")
241
+ def getFinalStorageLevel(self):
242
+ """
243
+ Gets the value of finalStorageLevel or its default value.
244
+ """
245
+ return self.getOrDefault(self.finalStorageLevel)
246
+
247
+
248
+ @inherit_doc
249
+ class ALS(JavaEstimator, _ALSParams, JavaMLWritable, JavaMLReadable):
250
+ """
251
+ Alternating Least Squares (ALS) matrix factorization.
252
+
253
+ ALS attempts to estimate the ratings matrix `R` as the product of
254
+ two lower-rank matrices, `X` and `Y`, i.e. `X * Yt = R`. Typically
255
+ these approximations are called 'factor' matrices. The general
256
+ approach is iterative. During each iteration, one of the factor
257
+ matrices is held constant, while the other is solved for using least
258
+ squares. The newly-solved factor matrix is then held constant while
259
+ solving for the other factor matrix.
260
+
261
+ This is a blocked implementation of the ALS factorization algorithm
262
+ that groups the two sets of factors (referred to as "users" and
263
+ "products") into blocks and reduces communication by only sending
264
+ one copy of each user vector to each product block on each
265
+ iteration, and only for the product blocks that need that user's
266
+ feature vector. This is achieved by pre-computing some information
267
+ about the ratings matrix to determine the "out-links" of each user
268
+ (which blocks of products it will contribute to) and "in-link"
269
+ information for each product (which of the feature vectors it
270
+ receives from each user block it will depend on). This allows us to
271
+ send only an array of feature vectors between each user block and
272
+ product block, and have the product block find the users' ratings
273
+ and update the products based on these messages.
274
+
275
+ For implicit preference data, the algorithm used is based on
276
+ `"Collaborative Filtering for Implicit Feedback Datasets",
277
+ <https://doi.org/10.1109/ICDM.2008.22>`_, adapted for the blocked
278
+ approach used here.
279
+
280
+ Essentially instead of finding the low-rank approximations to the
281
+ rating matrix `R`, this finds the approximations for a preference
282
+ matrix `P` where the elements of `P` are 1 if r > 0 and 0 if r <= 0.
283
+ The ratings then act as 'confidence' values related to strength of
284
+ indicated user preferences rather than explicit ratings given to
285
+ items.
286
+
287
+ .. versionadded:: 1.4.0
288
+
289
+ Notes
290
+ -----
291
+ The input rating dataframe to the ALS implementation should be deterministic.
292
+ Nondeterministic data can cause failure during fitting ALS model.
293
+ For example, an order-sensitive operation like sampling after a repartition makes
294
+ dataframe output nondeterministic, like `df.repartition(2).sample(False, 0.5, 1618)`.
295
+ Checkpointing sampled dataframe or adding a sort before sampling can help make the
296
+ dataframe deterministic.
297
+
298
+ Examples
299
+ --------
300
+ >>>
301
+ >> df = spark.createDataFrame(
302
+ ... [(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)],
303
+ ... ["user", "item", "rating"])
304
+ >> als = ALS(rank=10, seed=0)
305
+ >> als.setMaxIter(5)
306
+ ALS...
307
+ >> als.getMaxIter()
308
+ 5
309
+ >> als.setRegParam(0.1)
310
+ ALS...
311
+ >> als.getRegParam()
312
+ 0.1
313
+ >> als.clear(als.regParam)
314
+ >> model = als.fit(df)
315
+ >> model.getBlockSize()
316
+ 4096
317
+ >> model.getUserCol()
318
+ 'user'
319
+ >> model.setUserCol("user")
320
+ ALSModel...
321
+ >> model.getItemCol()
322
+ 'item'
323
+ >> model.setPredictionCol("newPrediction")
324
+ ALS...
325
+ >> model.rank
326
+ 10
327
+ >> model.userFactors.orderBy("id").collect()
328
+ [Row(id=0, features=[...]), Row(id=1, ...), Row(id=2, ...)]
329
+ >> test = spark.createDataFrame([(0, 2), (1, 0), (2, 0)], ["user", "item"])
330
+ >> predictions = sorted(model.transform(test).collect(), key=lambda r: r[0])
331
+ >> predictions[0]
332
+ Row(user=0, item=2, newPrediction=0.692910...)
333
+ >> predictions[1]
334
+ Row(user=1, item=0, newPrediction=3.473569...)
335
+ >> predictions[2]
336
+ Row(user=2, item=0, newPrediction=-0.899198...)
337
+ >> user_recs = model.recommendForAllUsers(3)
338
+ >> user_recs.where(user_recs.user == 0)\
339
+ .select("recommendations.item", "recommendations.rating").collect()
340
+ [Row(item=[0, 1, 2], rating=[3.910..., 1.997..., 0.692...])]
341
+ >> item_recs = model.recommendForAllItems(3)
342
+ >> item_recs.where(item_recs.item == 2)\
343
+ .select("recommendations.user", "recommendations.rating").collect()
344
+ [Row(user=[2, 1, 0], rating=[4.892..., 3.991..., 0.692...])]
345
+ >> user_subset = df.where(df.user == 2)
346
+ >> user_subset_recs = model.recommendForUserSubset(user_subset, 3)
347
+ >> user_subset_recs.select("recommendations.item", "recommendations.rating").first()
348
+ Row(item=[2, 1, 0], rating=[4.892..., 1.076..., -0.899...])
349
+ >> item_subset = df.where(df.item == 0)
350
+ >> item_subset_recs = model.recommendForItemSubset(item_subset, 3)
351
+ >> item_subset_recs.select("recommendations.user", "recommendations.rating").first()
352
+ Row(user=[0, 1, 2], rating=[3.910..., 3.473..., -0.899...])
353
+ >> als_path = temp_path + "/als"
354
+ >> als.save(als_path)
355
+ >> als2 = ALS.load(als_path)
356
+ >> als.getMaxIter()
357
+ 5
358
+ >> model_path = temp_path + "/als_model"
359
+ >> model.save(model_path)
360
+ >> model2 = ALSModel.load(model_path)
361
+ >> model.rank == model2.rank
362
+ True
363
+ >> sorted(model.userFactors.collect()) == sorted(model2.userFactors.collect())
364
+ True
365
+ >> sorted(model.itemFactors.collect()) == sorted(model2.itemFactors.collect())
366
+ True
367
+ >> model.transform(test).take(1) == model2.transform(test).take(1)
368
+ True
369
+ """
370
+
371
+ @keyword_only
372
+ def __init__(
373
+ self,
374
+ *,
375
+ rank=10,
376
+ maxIter=10,
377
+ regParam=0.1,
378
+ numUserBlocks=10,
379
+ numItemBlocks=10,
380
+ implicitPrefs=False,
381
+ alpha=1.0,
382
+ userCol="user",
383
+ itemCol="item",
384
+ seed=None,
385
+ ratingCol="rating",
386
+ nonnegative=False,
387
+ checkpointInterval=10,
388
+ intermediateStorageLevel="MEMORY_AND_DISK",
389
+ finalStorageLevel="MEMORY_AND_DISK",
390
+ coldStartStrategy="nan",
391
+ blockSize=4096,
392
+ ):
393
+ """
394
+ __init__(self, \\*, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10,
395
+ numItemBlocks=10, implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item", \
396
+ seed=None, ratingCol="rating", nonnegative=False, checkpointInterval=10, \
397
+ intermediateStorageLevel="MEMORY_AND_DISK", \
398
+ finalStorageLevel="MEMORY_AND_DISK", coldStartStrategy="nan", blockSize=4096)
399
+ """
400
+ super().__init__()
401
+ self._java_obj = self._new_java_obj("org.apache.spark.ml.recommendation.replay.ReplayALS", self.uid)
402
+ kwargs = self._input_kwargs
403
+ self.setParams(**kwargs)
404
+
405
+ @keyword_only
406
+ @since("1.4.0")
407
+ def setParams(
408
+ self,
409
+ *,
410
+ rank=10,
411
+ maxIter=10,
412
+ regParam=0.1,
413
+ numUserBlocks=10,
414
+ numItemBlocks=10,
415
+ implicitPrefs=False,
416
+ alpha=1.0,
417
+ userCol="user",
418
+ itemCol="item",
419
+ seed=None,
420
+ ratingCol="rating",
421
+ nonnegative=False,
422
+ checkpointInterval=10,
423
+ intermediateStorageLevel="MEMORY_AND_DISK",
424
+ finalStorageLevel="MEMORY_AND_DISK",
425
+ coldStartStrategy="nan",
426
+ blockSize=4096,
427
+ ):
428
+ """
429
+ setParams(self, \\*, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, \
430
+ numItemBlocks=10, implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item", \
431
+ seed=None, ratingCol="rating", nonnegative=False, checkpointInterval=10, \
432
+ intermediateStorageLevel="MEMORY_AND_DISK", \
433
+ finalStorageLevel="MEMORY_AND_DISK", coldStartStrategy="nan", blockSize=4096)
434
+ Sets params for ALS.
435
+ """
436
+ kwargs = self._input_kwargs
437
+ return self._set(**kwargs)
438
+
439
+ def _create_model(self, java_model):
440
+ return ALSModel(java_model)
441
+
442
+ @since("1.4.0")
443
+ def setRank(self, value):
444
+ """
445
+ Sets the value of :py:attr:`rank`.
446
+ """
447
+ return self._set(rank=value)
448
+
449
+ @since("1.4.0")
450
+ def setNumUserBlocks(self, value):
451
+ """
452
+ Sets the value of :py:attr:`numUserBlocks`.
453
+ """
454
+ return self._set(numUserBlocks=value)
455
+
456
+ @since("1.4.0")
457
+ def setNumItemBlocks(self, value):
458
+ """
459
+ Sets the value of :py:attr:`numItemBlocks`.
460
+ """
461
+ return self._set(numItemBlocks=value)
462
+
463
+ @since("1.4.0")
464
+ def setNumBlocks(self, value):
465
+ """
466
+ Sets both :py:attr:`numUserBlocks` and :py:attr:`numItemBlocks` to the specific value.
467
+ """
468
+ self._set(numUserBlocks=value)
469
+ return self._set(numItemBlocks=value)
470
+
471
+ @since("1.4.0")
472
+ def setImplicitPrefs(self, value):
473
+ """
474
+ Sets the value of :py:attr:`implicitPrefs`.
475
+ """
476
+ return self._set(implicitPrefs=value)
477
+
478
+ @since("1.4.0")
479
+ def setAlpha(self, value):
480
+ """
481
+ Sets the value of :py:attr:`alpha`.
482
+ """
483
+ return self._set(alpha=value)
484
+
485
+ @since("1.4.0")
486
+ def setUserCol(self, value):
487
+ """
488
+ Sets the value of :py:attr:`userCol`.
489
+ """
490
+ return self._set(userCol=value)
491
+
492
+ @since("1.4.0")
493
+ def setItemCol(self, value):
494
+ """
495
+ Sets the value of :py:attr:`itemCol`.
496
+ """
497
+ return self._set(itemCol=value)
498
+
499
+ @since("1.4.0")
500
+ def setRatingCol(self, value):
501
+ """
502
+ Sets the value of :py:attr:`ratingCol`.
503
+ """
504
+ return self._set(ratingCol=value)
505
+
506
+ @since("1.4.0")
507
+ def setNonnegative(self, value):
508
+ """
509
+ Sets the value of :py:attr:`nonnegative`.
510
+ """
511
+ return self._set(nonnegative=value)
512
+
513
+ @since("2.0.0")
514
+ def setIntermediateStorageLevel(self, value):
515
+ """
516
+ Sets the value of :py:attr:`intermediateStorageLevel`.
517
+ """
518
+ return self._set(intermediateStorageLevel=value)
519
+
520
+ @since("2.0.0")
521
+ def setFinalStorageLevel(self, value):
522
+ """
523
+ Sets the value of :py:attr:`finalStorageLevel`.
524
+ """
525
+ return self._set(finalStorageLevel=value)
526
+
527
+ @since("2.2.0")
528
+ def setColdStartStrategy(self, value):
529
+ """
530
+ Sets the value of :py:attr:`coldStartStrategy`.
531
+ """
532
+ return self._set(coldStartStrategy=value)
533
+
534
+ def setMaxIter(self, value):
535
+ """
536
+ Sets the value of :py:attr:`maxIter`.
537
+ """
538
+ return self._set(maxIter=value)
539
+
540
+ def setRegParam(self, value):
541
+ """
542
+ Sets the value of :py:attr:`regParam`.
543
+ """
544
+ return self._set(regParam=value)
545
+
546
+ def setPredictionCol(self, value):
547
+ """
548
+ Sets the value of :py:attr:`predictionCol`.
549
+ """
550
+ return self._set(predictionCol=value)
551
+
552
+ def setCheckpointInterval(self, value):
553
+ """
554
+ Sets the value of :py:attr:`checkpointInterval`.
555
+ """
556
+ return self._set(checkpointInterval=value)
557
+
558
+ def setSeed(self, value):
559
+ """
560
+ Sets the value of :py:attr:`seed`.
561
+ """
562
+ return self._set(seed=value)
563
+
564
+ @since("3.0.0")
565
+ def setBlockSize(self, value):
566
+ """
567
+ Sets the value of :py:attr:`blockSize`.
568
+ """
569
+ return self._set(blockSize=value)
570
+
571
+
572
+ class ALSModel(JavaModel, _ALSModelParams, JavaMLWritable, ALSModelJavaMLReadable):
573
+ """
574
+ Model fitted by ALS.
575
+
576
+ .. versionadded:: 1.4.0
577
+ """
578
+
579
+ @since("3.0.0")
580
+ def setUserCol(self, value):
581
+ """
582
+ Sets the value of :py:attr:`userCol`.
583
+ """
584
+ return self._set(userCol=value)
585
+
586
+ @since("3.0.0")
587
+ def setItemCol(self, value):
588
+ """
589
+ Sets the value of :py:attr:`itemCol`.
590
+ """
591
+ return self._set(itemCol=value)
592
+
593
+ @since("3.0.0")
594
+ def setColdStartStrategy(self, value):
595
+ """
596
+ Sets the value of :py:attr:`coldStartStrategy`.
597
+ """
598
+ return self._set(coldStartStrategy=value)
599
+
600
+ @since("3.0.0")
601
+ def setPredictionCol(self, value):
602
+ """
603
+ Sets the value of :py:attr:`predictionCol`.
604
+ """
605
+ return self._set(predictionCol=value)
606
+
607
+ @since("3.0.0")
608
+ def setBlockSize(self, value):
609
+ """
610
+ Sets the value of :py:attr:`blockSize`.
611
+ """
612
+ return self._set(blockSize=value)
613
+
614
+ @property
615
+ @since("1.4.0")
616
+ def rank(self):
617
+ """rank of the matrix factorization model"""
618
+ return self._call_java("rank")
619
+
620
+ @property
621
+ @since("1.4.0")
622
+ def userFactors(self):
623
+ """
624
+ a DataFrame that stores user factors in two columns: `id` and
625
+ `features`
626
+ """
627
+ return self._call_java("userFactors")
628
+
629
+ @property
630
+ @since("1.4.0")
631
+ def itemFactors(self):
632
+ """
633
+ a DataFrame that stores item factors in two columns: `id` and
634
+ `features`
635
+ """
636
+ return self._call_java("itemFactors")
637
+
638
+ def recommendForAllUsers(self, numItems):
639
+ """
640
+ Returns top `numItems` items recommended for each user, for all users.
641
+
642
+ .. versionadded:: 2.2.0
643
+
644
+ Parameters
645
+ ----------
646
+ numItems : int
647
+ max number of recommendations for each user
648
+
649
+ Returns
650
+ -------
651
+ :py:class:`pyspark.sql.DataFrame`
652
+ a DataFrame of (userCol, recommendations), where recommendations are
653
+ stored as an array of (itemCol, rating) Rows.
654
+ """
655
+ return self._call_java("recommendForAllUsers", numItems)
656
+
657
+ def recommendForAllItems(self, numUsers):
658
+ """
659
+ Returns top `numUsers` users recommended for each item, for all items.
660
+
661
+ .. versionadded:: 2.2.0
662
+
663
+ Parameters
664
+ ----------
665
+ numUsers : int
666
+ max number of recommendations for each item
667
+
668
+ Returns
669
+ -------
670
+ :py:class:`pyspark.sql.DataFrame`
671
+ a DataFrame of (itemCol, recommendations), where recommendations are
672
+ stored as an array of (userCol, rating) Rows.
673
+ """
674
+ return self._call_java("recommendForAllItems", numUsers)
675
+
676
+ def recommendForUserSubset(self, dataset, numItems):
677
+ """
678
+ Returns top `numItems` items recommended for each user id in the input data set. Note that
679
+ if there are duplicate ids in the input dataset, only one set of recommendations per unique
680
+ id will be returned.
681
+
682
+ .. versionadded:: 2.3.0
683
+
684
+ Parameters
685
+ ----------
686
+ dataset : :py:class:`pyspark.sql.DataFrame`
687
+ a DataFrame containing a column of user ids. The column name must match `userCol`.
688
+ numItems : int
689
+ max number of recommendations for each user
690
+
691
+ Returns
692
+ -------
693
+ :py:class:`pyspark.sql.DataFrame`
694
+ a DataFrame of (userCol, recommendations), where recommendations are
695
+ stored as an array of (itemCol, rating) Rows.
696
+ """
697
+ return self._call_java("recommendForUserSubset", dataset, numItems)
698
+
699
+ def recommendForItemSubset(self, dataset, numUsers):
700
+ """
701
+ Returns top `numUsers` users recommended for each item id in the input data set. Note that
702
+ if there are duplicate ids in the input dataset, only one set of recommendations per unique
703
+ id will be returned.
704
+
705
+ .. versionadded:: 2.3.0
706
+
707
+ Parameters
708
+ ----------
709
+ dataset : :py:class:`pyspark.sql.DataFrame`
710
+ a DataFrame containing a column of item ids. The column name must match `itemCol`.
711
+ numUsers : int
712
+ max number of recommendations for each item
713
+
714
+ Returns
715
+ -------
716
+ :py:class:`pyspark.sql.DataFrame`
717
+ a DataFrame of (itemCol, recommendations), where recommendations are
718
+ stored as an array of (userCol, rating) Rows.
719
+ """
720
+ return self._call_java("recommendForItemSubset", dataset, numUsers)
721
+
722
+ def recommendItemsForUserItemSubset(self, usersDataset, itemsDataset, numItems):
723
+ return self._call_java("recommendItemsForUserItemSubset", usersDataset, itemsDataset, numItems)
724
+
725
+ @staticmethod
726
+ def _from_java(java_stage):
727
+ """
728
+ Given a Java object, create and return a Python wrapper of it.
729
+ Used for ML persistence.
730
+
731
+ Meta-algorithms such as Pipeline should override this method as a classmethod.
732
+ """
733
+
734
+ def __get_class(clazz):
735
+ """
736
+ Loads Python class from its name.
737
+ """
738
+ parts = clazz.split(".")
739
+ module = ".".join(parts[:-1])
740
+ m = __import__(module)
741
+ for comp in parts[1:]:
742
+ m = getattr(m, comp)
743
+ return m
744
+
745
+ stage_name = "replay.experimental.models.extensions.spark_custom_models.als_extension.ALSModel"
746
+ # Generate a default new instance from the stage_name class.
747
+ py_type = __get_class(stage_name)
748
+ if issubclass(py_type, JavaParams):
749
+ # Load information from java_stage to the instance.
750
+ py_stage = py_type()
751
+ py_stage._java_obj = java_stage
752
+
753
+ # SPARK-10931: Temporary fix so that persisted models would own params from Estimator
754
+ if issubclass(py_type, JavaModel):
755
+ py_stage._create_params_from_java()
756
+
757
+ py_stage._resetUid(java_stage.uid())
758
+ py_stage._transfer_params_from_java()
759
+ elif hasattr(py_type, "_from_java"):
760
+ py_stage = py_type._from_java(java_stage)
761
+ else:
762
+ raise NotImplementedError("This Java stage cannot be loaded into Python currently: %r" % stage_name)
763
+ return py_stage
764
+
765
+
766
+ if __name__ == "__main__":
767
+ import doctest
768
+
769
+ import pyspark.ml.recommendation
770
+ from pyspark.sql import SparkSession
771
+
772
+ globs = pyspark.ml.recommendation.__dict__.copy()
773
+ # The small batch size here ensures that we see multiple batches,
774
+ # even in these small test examples:
775
+ spark = SparkSession.builder.master("local[2]").appName("ml.recommendation tests").getOrCreate()
776
+ sc = spark.sparkContext
777
+ globs["sc"] = sc
778
+ globs["spark"] = spark
779
+ import tempfile
780
+
781
+ temp_path = tempfile.mkdtemp()
782
+ globs["temp_path"] = temp_path
783
+ try:
784
+ (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
785
+ spark.stop()
786
+ finally:
787
+ from shutil import rmtree
788
+
789
+ with contextlib.suppress(OSError):
790
+ rmtree(temp_path)
791
+ if failure_count:
792
+ sys.exit(-1)