spforge 0.8.8__py3-none-any.whl → 0.8.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of spforge might be problematic. Click here for more details.

@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
 
4
4
  import copy
5
5
  import math
6
+ import logging
6
7
  from typing import Any, Literal
7
8
 
8
9
  import narwhals.stable.v2 as nw
@@ -15,6 +16,7 @@ from spforge.data_structures import (
15
16
  MatchPerformance,
16
17
  MatchPlayer,
17
18
  PlayerRating,
19
+ PlayerRatingChange,
18
20
  PlayerRatingsResult,
19
21
  PreMatchPlayerRating,
20
22
  PreMatchPlayersCollection,
@@ -33,6 +35,8 @@ from spforge.ratings.utils import (
33
35
  from spforge.feature_generator._utils import to_polars
34
36
 
35
37
  PLAYER_STATS = "__PLAYER_STATS"
38
+ _SCALED_PW = "__scaled_participation_weight__"
39
+ _SCALED_PPW = "__scaled_projected_participation_weight__"
36
40
 
37
41
 
38
42
  class PlayerRatingGenerator(RatingGenerator):
@@ -75,12 +79,13 @@ class PlayerRatingGenerator(RatingGenerator):
75
79
  start_min_count_for_percentiles: int = 50,
76
80
  start_team_rating_subtract: float = 80,
77
81
  start_team_weight: float = 0,
78
- start_max_days_ago_league_entities: int = 120,
82
+ start_max_days_ago_league_entities: int = 600,
79
83
  start_min_match_count_team_rating: int = 2,
80
84
  start_harcoded_start_rating: float | None = None,
81
85
  column_names: ColumnNames | None = None,
82
86
  output_suffix: str | None = None,
83
87
  scale_participation_weights: bool = False,
88
+ auto_scale_participation_weights: bool = True,
84
89
  **kwargs: Any,
85
90
  ):
86
91
  super().__init__(
@@ -164,6 +169,7 @@ class PlayerRatingGenerator(RatingGenerator):
164
169
 
165
170
  self.use_off_def_split = bool(use_off_def_split)
166
171
  self.scale_participation_weights = bool(scale_participation_weights)
172
+ self.auto_scale_participation_weights = bool(auto_scale_participation_weights)
167
173
  self._participation_weight_max: float | None = None
168
174
  self._projected_participation_weight_max: float | None = None
169
175
 
@@ -189,9 +195,39 @@ class PlayerRatingGenerator(RatingGenerator):
189
195
  column_names: ColumnNames | None = None,
190
196
  ) -> DataFrame | IntoFrameT:
191
197
  self.column_names = column_names if column_names else self.column_names
198
+ self._maybe_enable_participation_weight_scaling(df)
192
199
  self._set_participation_weight_max(df)
193
200
  return super().fit_transform(df, column_names)
194
201
 
202
+ def _maybe_enable_participation_weight_scaling(self, df: DataFrame) -> None:
203
+ if self.scale_participation_weights or not self.auto_scale_participation_weights:
204
+ return
205
+ cn = self.column_names
206
+ if not cn:
207
+ return
208
+
209
+ pl_df = df.to_native() if df.implementation.is_polars() else df.to_polars().to_native()
210
+
211
+ def _out_of_bounds(col_name: str | None) -> bool:
212
+ if not col_name or col_name not in df.columns:
213
+ return False
214
+ col = pl_df[col_name]
215
+ min_val = col.min()
216
+ max_val = col.max()
217
+ if min_val is None or max_val is None:
218
+ return False
219
+ eps = 1e-6
220
+ return min_val < -eps or max_val > (1.0 + eps)
221
+
222
+ if _out_of_bounds(cn.participation_weight) or _out_of_bounds(
223
+ cn.projected_participation_weight
224
+ ):
225
+ self.scale_participation_weights = True
226
+ logging.warning(
227
+ "Auto-scaling participation weights because values exceed [0, 1]. "
228
+ "Set scale_participation_weights=True explicitly to silence this warning."
229
+ )
230
+
195
231
  def _ensure_player_off(self, player_id: str) -> PlayerRating:
196
232
  if player_id not in self._player_off_ratings:
197
233
  # create with start generator later; initialize to 0 now; overwritten when needed
@@ -240,6 +276,7 @@ class PlayerRatingGenerator(RatingGenerator):
240
276
  self._projected_participation_weight_max = self._participation_weight_max
241
277
 
242
278
  def _scale_participation_weight_columns(self, df: pl.DataFrame) -> pl.DataFrame:
279
+ """Create internal scaled participation weight columns without mutating originals."""
243
280
  if not self.scale_participation_weights:
244
281
  return df
245
282
  if self._participation_weight_max is None or self._participation_weight_max <= 0:
@@ -254,7 +291,7 @@ class PlayerRatingGenerator(RatingGenerator):
254
291
  df = df.with_columns(
255
292
  (pl.col(cn.participation_weight) / denom)
256
293
  .clip(0.0, 1.0)
257
- .alias(cn.participation_weight)
294
+ .alias(_SCALED_PW)
258
295
  )
259
296
 
260
297
  if (
@@ -267,16 +304,38 @@ class PlayerRatingGenerator(RatingGenerator):
267
304
  df = df.with_columns(
268
305
  (pl.col(cn.projected_participation_weight) / denom)
269
306
  .clip(0.0, 1.0)
270
- .alias(cn.projected_participation_weight)
307
+ .alias(_SCALED_PPW)
271
308
  )
272
309
 
273
310
  return df
274
311
 
312
+ def _get_participation_weight_col(self) -> str:
313
+ """Get the column name to use for participation weight (scaled if available)."""
314
+ cn = self.column_names
315
+ if self.scale_participation_weights and cn and cn.participation_weight:
316
+ return _SCALED_PW
317
+ return cn.participation_weight if cn else ""
318
+
319
+ def _get_projected_participation_weight_col(self) -> str:
320
+ """Get the column name to use for projected participation weight (scaled if available)."""
321
+ cn = self.column_names
322
+ if self.scale_participation_weights and cn and cn.projected_participation_weight:
323
+ return _SCALED_PPW
324
+ return cn.projected_participation_weight if cn else ""
325
+
326
+ def _remove_internal_scaled_columns(self, df: pl.DataFrame) -> pl.DataFrame:
327
+ """Remove internal scaled columns before returning."""
328
+ cols_to_drop = [c for c in [_SCALED_PW, _SCALED_PPW] if c in df.columns]
329
+ if cols_to_drop:
330
+ df = df.drop(cols_to_drop)
331
+ return df
332
+
275
333
  def _historical_transform(self, df: pl.DataFrame) -> pl.DataFrame:
276
334
  df = self._scale_participation_weight_columns(df)
277
335
  match_df = self._create_match_df(df)
278
336
  ratings = self._calculate_ratings(match_df)
279
337
 
338
+ # Keep scaled columns for now - they're needed by _add_rating_features
280
339
  cols = [
281
340
  c
282
341
  for c in df.columns
@@ -296,13 +355,15 @@ class PlayerRatingGenerator(RatingGenerator):
296
355
  on=[self.column_names.player_id, self.column_names.match_id, self.column_names.team_id],
297
356
  )
298
357
 
299
- return self._add_rating_features(df)
358
+ result = self._add_rating_features(df)
359
+ return self._remove_internal_scaled_columns(result)
300
360
 
301
361
  def _future_transform(self, df: pl.DataFrame) -> pl.DataFrame:
302
362
  df = self._scale_participation_weight_columns(df)
303
363
  match_df = self._create_match_df(df)
304
364
  ratings = self._calculate_future_ratings(match_df)
305
365
 
366
+ # Keep scaled columns for now - they're needed by _add_rating_features
306
367
  cols = [
307
368
  c
308
369
  for c in df.columns
@@ -327,7 +388,8 @@ class PlayerRatingGenerator(RatingGenerator):
327
388
  how="left",
328
389
  )
329
390
 
330
- return self._add_rating_features(df_with_ratings)
391
+ result = self._add_rating_features(df_with_ratings)
392
+ return self._remove_internal_scaled_columns(result)
331
393
 
332
394
  def _calculate_ratings(self, match_df: pl.DataFrame) -> pl.DataFrame:
333
395
  cn = self.column_names
@@ -381,9 +443,9 @@ class PlayerRatingGenerator(RatingGenerator):
381
443
  team1_off_rating, team1_def_rating = self._team_off_def_rating_from_collection(c1)
382
444
  team2_off_rating, team2_def_rating = self._team_off_def_rating_from_collection(c2)
383
445
 
384
- player_updates: list[tuple[str, str, float, float, float, float, float, float, int]] = (
385
- []
386
- )
446
+ player_updates: list[
447
+ tuple[str, str, float, float, float, float, float, float, int, str | None]
448
+ ] = []
387
449
 
388
450
  for pre_player in c1.pre_match_player_ratings:
389
451
  pid = pre_player.id
@@ -459,6 +521,7 @@ class PlayerRatingGenerator(RatingGenerator):
459
521
  float(off_change),
460
522
  float(def_change),
461
523
  day_number,
524
+ pre_player.league,
462
525
  )
463
526
  )
464
527
 
@@ -536,6 +599,7 @@ class PlayerRatingGenerator(RatingGenerator):
536
599
  float(off_change),
537
600
  float(def_change),
538
601
  day_number,
602
+ pre_player.league,
539
603
  )
540
604
  )
541
605
 
@@ -550,6 +614,7 @@ class PlayerRatingGenerator(RatingGenerator):
550
614
  _off_change,
551
615
  _def_change,
552
616
  _dn,
617
+ _league,
553
618
  ) in player_updates:
554
619
  out[cn.player_id].append(pid)
555
620
  out[cn.match_id].append(match_id)
@@ -566,15 +631,18 @@ class PlayerRatingGenerator(RatingGenerator):
566
631
  for (
567
632
  pid,
568
633
  team_id,
569
- _off_pre,
634
+ off_pre,
570
635
  _def_pre,
571
636
  _pred_off,
572
637
  _pred_def,
573
638
  off_change,
574
639
  def_change,
575
640
  dn,
641
+ league,
576
642
  ) in player_updates:
577
- pending_team_updates.append((pid, team_id, off_change, def_change, dn))
643
+ pending_team_updates.append(
644
+ (pid, team_id, off_pre, off_change, def_change, dn, league)
645
+ )
578
646
 
579
647
  if last_update_id is None:
580
648
  last_update_id = update_id
@@ -584,9 +652,11 @@ class PlayerRatingGenerator(RatingGenerator):
584
652
 
585
653
  return pl.DataFrame(out, strict=False)
586
654
 
587
- def _apply_player_updates(self, updates: list[tuple[str, str, float, float, int]]) -> None:
655
+ def _apply_player_updates(
656
+ self, updates: list[tuple[str, str, float, float, float, int, str | None]]
657
+ ) -> None:
588
658
 
589
- for player_id, team_id, off_change, def_change, day_number in updates:
659
+ for player_id, team_id, pre_rating, off_change, def_change, day_number, league in updates:
590
660
  off_state = self._player_off_ratings[player_id]
591
661
  off_state.confidence_sum = self._calculate_post_match_confidence_sum(
592
662
  entity_rating=off_state,
@@ -609,6 +679,19 @@ class PlayerRatingGenerator(RatingGenerator):
609
679
  def_state.last_match_day_number = int(day_number)
610
680
  def_state.most_recent_team_id = team_id
611
681
 
682
+ self.start_rating_generator.update_players_to_leagues(
683
+ PlayerRatingChange(
684
+ id=player_id,
685
+ day_number=day_number,
686
+ league=league,
687
+ participation_weight=1.0,
688
+ predicted_performance=0.0,
689
+ performance=0.0,
690
+ pre_match_rating_value=pre_rating,
691
+ rating_change_value=off_change,
692
+ )
693
+ )
694
+
612
695
  def _add_rating_features(self, df: pl.DataFrame) -> pl.DataFrame:
613
696
  cols_to_add = set((self._features_out or []) + (self.non_predictor_features_out or []))
614
697
 
@@ -763,9 +846,13 @@ class PlayerRatingGenerator(RatingGenerator):
763
846
 
764
847
  if cn.participation_weight and cn.participation_weight in df.columns:
765
848
  player_stat_cols.append(cn.participation_weight)
849
+ if _SCALED_PW in df.columns:
850
+ player_stat_cols.append(_SCALED_PW)
766
851
 
767
852
  if cn.projected_participation_weight and cn.projected_participation_weight in df.columns:
768
853
  player_stat_cols.append(cn.projected_participation_weight)
854
+ if _SCALED_PPW in df.columns:
855
+ player_stat_cols.append(_SCALED_PPW)
769
856
 
770
857
  if cn.position and cn.position in df.columns:
771
858
  player_stat_cols.append(cn.position)
@@ -821,14 +908,23 @@ class PlayerRatingGenerator(RatingGenerator):
821
908
  position = team_player.get(cn.position)
822
909
  player_league = team_player.get(cn.league, None)
823
910
 
824
- participation_weight = (
825
- team_player.get(cn.participation_weight, 1.0) if cn.participation_weight else 1.0
826
- )
827
- projected_participation_weight = (
828
- team_player.get(cn.projected_participation_weight, participation_weight)
829
- if cn.projected_participation_weight
830
- else participation_weight
831
- )
911
+ # Use scaled participation weight if available, otherwise use original
912
+ if _SCALED_PW in team_player:
913
+ participation_weight = team_player.get(_SCALED_PW, 1.0)
914
+ elif cn.participation_weight:
915
+ participation_weight = team_player.get(cn.participation_weight, 1.0)
916
+ else:
917
+ participation_weight = 1.0
918
+
919
+ # Use scaled projected participation weight if available, otherwise use original
920
+ if _SCALED_PPW in team_player:
921
+ projected_participation_weight = team_player.get(_SCALED_PPW, participation_weight)
922
+ elif cn.projected_participation_weight:
923
+ projected_participation_weight = team_player.get(
924
+ cn.projected_participation_weight, participation_weight
925
+ )
926
+ else:
927
+ projected_participation_weight = participation_weight
832
928
  projected_participation_weights.append(projected_participation_weight)
833
929
 
834
930
  perf_val = (
@@ -1054,14 +1150,21 @@ class PlayerRatingGenerator(RatingGenerator):
1054
1150
  position = tp.get(cn.position)
1055
1151
  league = tp.get(cn.league, None)
1056
1152
 
1057
- pw = (
1058
- tp.get(cn.participation_weight, 1.0) if cn.participation_weight else 1.0
1059
- )
1060
- ppw = (
1061
- tp.get(cn.projected_participation_weight, pw)
1062
- if cn.projected_participation_weight
1063
- else pw
1064
- )
1153
+ # Use scaled participation weight if available, otherwise use original
1154
+ if _SCALED_PW in tp:
1155
+ pw = tp.get(_SCALED_PW, 1.0)
1156
+ elif cn.participation_weight:
1157
+ pw = tp.get(cn.participation_weight, 1.0)
1158
+ else:
1159
+ pw = 1.0
1160
+
1161
+ # Use scaled projected participation weight if available, otherwise use original
1162
+ if _SCALED_PPW in tp:
1163
+ ppw = tp.get(_SCALED_PPW, pw)
1164
+ elif cn.projected_participation_weight:
1165
+ ppw = tp.get(cn.projected_participation_weight, pw)
1166
+ else:
1167
+ ppw = pw
1065
1168
  proj_w.append(float(ppw))
1066
1169
 
1067
1170
  mp = MatchPerformance(
@@ -28,7 +28,7 @@ class StartRatingGenerator:
28
28
  min_count_for_percentiles: int = 50,
29
29
  team_rating_subtract: float = 80,
30
30
  team_weight: float = 0,
31
- max_days_ago_league_entities: int = 120,
31
+ max_days_ago_league_entities: int = 600,
32
32
  min_match_count_team_rating: int = 2,
33
33
  harcoded_start_rating: float | None = None,
34
34
  ):
@@ -24,7 +24,7 @@ class TeamStartRatingGenerator:
24
24
  league_ratings: dict[str, float] | None = None,
25
25
  league_quantile: float = 0.2,
26
26
  min_count_for_percentiles: int = 50,
27
- max_days_ago_league_entities: int = 120,
27
+ max_days_ago_league_entities: int = 600,
28
28
  min_match_count_team_rating: int = 2,
29
29
  harcoded_start_rating: float | None = None,
30
30
  ):
spforge/ratings/utils.py CHANGED
@@ -2,6 +2,10 @@ import polars as pl
2
2
 
3
3
  from spforge.data_structures import ColumnNames
4
4
 
5
+ # Internal column names for scaled participation weights
6
+ _SCALED_PW = "__scaled_participation_weight__"
7
+ _SCALED_PPW = "__scaled_projected_participation_weight__"
8
+
5
9
 
6
10
  def add_team_rating(
7
11
  df: pl.DataFrame,
@@ -46,11 +50,14 @@ def add_team_rating_projected(
46
50
  tid = column_names.team_id
47
51
  ppw = column_names.projected_participation_weight
48
52
 
49
- if ppw:
53
+ # Use scaled column if available (clipped to [0, 1]), otherwise raw column
54
+ weight_col = _SCALED_PPW if _SCALED_PPW in df.columns else ppw
55
+
56
+ if weight_col and weight_col in df.columns:
50
57
  return df.with_columns(
51
58
  (
52
- (pl.col(ppw) * pl.col(player_rating_col)).sum().over([mid, tid])
53
- / pl.col(ppw).sum().over([mid, tid])
59
+ (pl.col(weight_col) * pl.col(player_rating_col)).sum().over([mid, tid])
60
+ / pl.col(weight_col).sum().over([mid, tid])
54
61
  ).alias(team_rating_out)
55
62
  )
56
63
 
@@ -118,11 +125,14 @@ def add_rating_mean_projected(
118
125
  mid = column_names.match_id
119
126
  ppw = column_names.projected_participation_weight
120
127
 
121
- if ppw:
128
+ # Use scaled column if available (clipped to [0, 1]), otherwise raw column
129
+ weight_col = _SCALED_PPW if _SCALED_PPW in df.columns else ppw
130
+
131
+ if weight_col and weight_col in df.columns:
122
132
  return df.with_columns(
123
133
  (
124
- (pl.col(ppw) * pl.col(player_rating_col)).sum().over(mid)
125
- / pl.col(ppw).sum().over(mid)
134
+ (pl.col(weight_col) * pl.col(player_rating_col)).sum().over(mid)
135
+ / pl.col(weight_col).sum().over(mid)
126
136
  ).alias(rating_mean_out)
127
137
  )
128
138
 
spforge/scorer/_score.py CHANGED
@@ -366,18 +366,49 @@ class PWMSE(BaseScorer):
366
366
  self.labels = labels
367
367
  self.evaluation_labels = evaluation_labels
368
368
 
369
+ self._needs_extension = False
370
+ self._needs_slicing = False
369
371
  self._eval_indices: list[int] | None = None
372
+ self._extension_mapping: dict[int, int] | None = None
373
+
370
374
  if self.evaluation_labels is not None and self.labels is not None:
371
- label_to_idx = {lbl: i for i, lbl in enumerate(self.labels)}
372
- self._eval_indices = [label_to_idx[lbl] for lbl in self.evaluation_labels]
375
+ training_set = set(self.labels)
376
+ eval_set = set(self.evaluation_labels)
377
+
378
+ if eval_set <= training_set:
379
+ self._needs_slicing = True
380
+ label_to_idx = {lbl: i for i, lbl in enumerate(self.labels)}
381
+ self._eval_indices = [label_to_idx[lbl] for lbl in self.evaluation_labels]
382
+ elif training_set <= eval_set:
383
+ self._needs_extension = True
384
+ eval_label_to_idx = {lbl: i for i, lbl in enumerate(self.evaluation_labels)}
385
+ self._extension_mapping = {
386
+ train_idx: eval_label_to_idx[lbl]
387
+ for train_idx, lbl in enumerate(self.labels)
388
+ }
389
+ else:
390
+ raise ValueError(
391
+ f"evaluation_labels must be a subset or superset of labels. "
392
+ f"labels={self.labels}, evaluation_labels={self.evaluation_labels}"
393
+ )
394
+
395
+ def _align_predictions(self, preds: np.ndarray) -> np.ndarray:
396
+ if self._needs_slicing and self._eval_indices is not None:
397
+ sliced = preds[:, self._eval_indices]
398
+ row_sums = sliced.sum(axis=1, keepdims=True)
399
+ row_sums = np.where(row_sums == 0, 1.0, row_sums)
400
+ return sliced / row_sums
401
+
402
+ if self._needs_extension and self._extension_mapping is not None:
403
+ n_samples = preds.shape[0]
404
+ n_eval_labels = len(self.evaluation_labels)
405
+ extended = np.full((n_samples, n_eval_labels), 1e-5, dtype=np.float64)
406
+ for train_idx, eval_idx in self._extension_mapping.items():
407
+ extended[:, eval_idx] = preds[:, train_idx]
408
+ row_sums = extended.sum(axis=1, keepdims=True)
409
+ return extended / row_sums
373
410
 
374
- def _slice_and_renormalize(self, preds: np.ndarray) -> np.ndarray:
375
- if self._eval_indices is None:
376
- return preds
377
- sliced = preds[:, self._eval_indices]
378
- row_sums = sliced.sum(axis=1, keepdims=True)
379
- row_sums = np.where(row_sums == 0, 1.0, row_sums)
380
- return sliced / row_sums
411
+ return preds
381
412
 
382
413
  def _get_scoring_labels(self) -> list[int]:
383
414
  if self.evaluation_labels is not None:
@@ -446,7 +477,7 @@ class PWMSE(BaseScorer):
446
477
 
447
478
  targets = gran_df[self.target].to_numpy().astype(np.float64)
448
479
  preds = np.asarray(gran_df[self.pred_column].to_list(), dtype=np.float64)
449
- preds = self._slice_and_renormalize(preds)
480
+ preds = self._align_predictions(preds)
450
481
  score = self._pwmse_score(targets, preds)
451
482
  if self.compare_to_naive:
452
483
  naive_probs_list = _naive_probability_predictions_for_df(
@@ -464,7 +495,7 @@ class PWMSE(BaseScorer):
464
495
 
465
496
  targets = df[self.target].to_numpy().astype(np.float64)
466
497
  preds = np.asarray(df[self.pred_column].to_list(), dtype=np.float64)
467
- preds = self._slice_and_renormalize(preds)
498
+ preds = self._align_predictions(preds)
468
499
  score = self._pwmse_score(targets, preds)
469
500
  if self.compare_to_naive:
470
501
  naive_probs_list = _naive_probability_predictions_for_df(
@@ -8,8 +8,9 @@ from sklearn.base import BaseEstimator, TransformerMixin
8
8
 
9
9
 
10
10
  class GroupByReducer(BaseEstimator, TransformerMixin):
11
- def __init__(self, granularity: list[str]):
11
+ def __init__(self, granularity: list[str], aggregation_weight: str | None = None):
12
12
  self.granularity = granularity
13
+ self.aggregation_weight = aggregation_weight
13
14
 
14
15
  @nw.narwhalify
15
16
  def fit(self, X: IntoFrameT, y: Any = None):
@@ -26,18 +27,47 @@ class GroupByReducer(BaseEstimator, TransformerMixin):
26
27
  raise ValueError("Could not find granularity columns in dataframe %s", self.granularity)
27
28
 
28
29
  non_keys = [c for c in df.columns if c not in keys]
29
- num_cols = [c for c in non_keys if pd.api.types.is_numeric_dtype(df[c])]
30
+ schema = df.schema
31
+ num_cols = [c for c in non_keys if schema[c].is_numeric()]
30
32
  other_cols = [c for c in non_keys if c not in num_cols]
31
33
 
32
34
  aggs: list[nw.Expr] = []
33
35
 
36
+ # Backwards compatibility: old pickled objects may not have aggregation_weight
37
+ weight_col = getattr(self, "aggregation_weight", None)
38
+ has_weight = weight_col and weight_col in df.columns
39
+
34
40
  for c in num_cols:
35
- aggs.append(nw.col(c).mean().alias(c))
41
+ if c == weight_col:
42
+ aggs.append(nw.col(c).sum().alias(c))
43
+ elif has_weight:
44
+ aggs.append((nw.col(c) * nw.col(weight_col)).sum().alias(f"__{c}_weighted_sum"))
45
+ aggs.append(nw.col(c).mean().alias(f"__{c}_fallback"))
46
+ else:
47
+ aggs.append(nw.col(c).mean().alias(c))
36
48
 
37
49
  for c in other_cols:
38
50
  aggs.append(nw.col(c).first().alias(c))
39
51
 
52
+ if has_weight:
53
+ aggs.append(nw.col(weight_col).sum().alias("__weight_sum"))
54
+
40
55
  out = df.group_by(keys).agg(aggs)
56
+
57
+ if has_weight:
58
+ weighted_cols = [c for c in num_cols if c != weight_col]
59
+ for c in weighted_cols:
60
+ out = out.with_columns(
61
+ nw.when((~nw.col("__weight_sum").is_null()) & (nw.col("__weight_sum") != 0))
62
+ .then(nw.col(f"__{c}_weighted_sum") / nw.col("__weight_sum"))
63
+ .otherwise(nw.col(f"__{c}_fallback"))
64
+ .alias(c)
65
+ )
66
+ drop_cols = [f"__{c}_weighted_sum" for c in weighted_cols]
67
+ drop_cols += [f"__{c}_fallback" for c in weighted_cols]
68
+ drop_cols.append("__weight_sum")
69
+ out = out.drop(drop_cols)
70
+
41
71
  return out
42
72
 
43
73
  @nw.narwhalify
@@ -59,12 +89,12 @@ class GroupByReducer(BaseEstimator, TransformerMixin):
59
89
  if sample_weight is not None:
60
90
  df = df.with_columns(nw.lit(sample_weight).alias("__sw"))
61
91
 
62
- y_is_numeric = df.select(nw.col("__y")).schema["__y"].is_numeric()
92
+ y_uniques = df.group_by(keys).agg(nw.col("__y").n_unique().alias("__y_nunique"))
93
+ non_uniform = y_uniques.filter(nw.col("__y_nunique") > 1)
94
+ if len(non_uniform) > 0:
95
+ raise ValueError("Target (y) must be uniform within each granularity group")
63
96
 
64
- if y_is_numeric:
65
- agg_exprs = [nw.col("__y").mean().alias("__y")]
66
- else:
67
- agg_exprs = [nw.col("__y").first().alias("__y")]
97
+ agg_exprs = [nw.col("__y").first().alias("__y")]
68
98
 
69
99
  if sample_weight is not None:
70
100
  agg_exprs.append(nw.col("__sw").sum().alias("__sw"))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: spforge
3
- Version: 0.8.8
3
+ Version: 0.8.19
4
4
  Summary: A flexible framework for generating features, ratings, and building machine learning or other models for training and inference on sports data.
5
5
  Author-email: Mathias Holmstrøm <mathiasholmstom@gmail.com>
6
6
  License: See LICENSE file