spforge 0.8.27__tar.gz → 0.8.30__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of spforge might be problematic. Click here for more details.
- {spforge-0.8.27/spforge.egg-info → spforge-0.8.30}/PKG-INFO +1 -1
- {spforge-0.8.27 → spforge-0.8.30}/pyproject.toml +1 -1
- {spforge-0.8.27 → spforge-0.8.30}/spforge/data_structures.py +4 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/ratings/_player_rating.py +128 -5
- {spforge-0.8.27 → spforge-0.8.30}/spforge/ratings/player_performance_predictor.py +11 -13
- {spforge-0.8.27 → spforge-0.8.30}/spforge/scorer/_score.py +121 -0
- {spforge-0.8.27 → spforge-0.8.30/spforge.egg-info}/PKG-INFO +1 -1
- {spforge-0.8.27 → spforge-0.8.30}/spforge.egg-info/SOURCES.txt +1 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/ratings/test_player_rating_generator.py +470 -1
- spforge-0.8.30/tests/scorer/test_scorer_name.py +292 -0
- {spforge-0.8.27 → spforge-0.8.30}/LICENSE +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/MANIFEST.in +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/README.md +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/examples/__init__.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/examples/game_level_example.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/examples/lol/__init__.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/examples/lol/data/__init__.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/examples/lol/data/subsample_lol_data.parquet +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/examples/lol/data/utils.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/examples/lol/pipeline_transformer_example.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/examples/nba/__init__.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/examples/nba/cross_validation_example.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/examples/nba/data/__init__.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/examples/nba/data/game_player_subsample.parquet +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/examples/nba/data/utils.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/examples/nba/feature_engineering_example.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/examples/nba/game_winner_example.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/examples/nba/predictor_transformers_example.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/setup.cfg +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/__init__.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/autopipeline.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/base_feature_generator.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/cross_validator/__init__.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/cross_validator/_base.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/cross_validator/cross_validator.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/distributions/__init__.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/distributions/_negative_binomial_estimator.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/distributions/_normal_distribution_predictor.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/distributions/_student_t_distribution_estimator.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/estimator/__init__.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/estimator/_conditional_estimator.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/estimator/_frequency_bucketing_classifier.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/estimator/_granularity_estimator.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/estimator/_group_by_estimator.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/estimator/_ordinal_classifier.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/estimator/_sklearn_enhancer_estimator.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/feature_generator/__init__.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/feature_generator/_base.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/feature_generator/_lag.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/feature_generator/_net_over_predicted.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/feature_generator/_regressor_feature_generator.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/feature_generator/_rolling_against_opponent.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/feature_generator/_rolling_mean_binary.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/feature_generator/_rolling_mean_days.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/feature_generator/_rolling_window.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/feature_generator/_utils.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/features_generator_pipeline.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/hyperparameter_tuning/__init__.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/hyperparameter_tuning/_default_search_spaces.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/hyperparameter_tuning/_tuner.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/performance_transformers/__init__.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/performance_transformers/_performance_manager.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/performance_transformers/_performances_transformers.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/ratings/__init__.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/ratings/_base.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/ratings/_team_rating.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/ratings/enums.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/ratings/league_identifier.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/ratings/league_start_rating_optimizer.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/ratings/start_rating_generator.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/ratings/team_performance_predictor.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/ratings/team_start_rating_generator.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/ratings/utils.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/scorer/__init__.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/transformers/__init__.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/transformers/_base.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/transformers/_net_over_predicted.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/transformers/_operator.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/transformers/_other_transformer.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/transformers/_predictor.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/transformers/_simple_transformer.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/transformers/_team_ratio_predictor.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge/utils.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge.egg-info/dependency_links.txt +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge.egg-info/requires.txt +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/spforge.egg-info/top_level.txt +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/cross_validator/test_cross_validator.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/distributions/test_distribution.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/end_to_end/test_estimator_hyperparameter_tuning.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/end_to_end/test_league_start_rating_optimizer.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/end_to_end/test_lol_player_kills.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/end_to_end/test_nba_player_points.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/end_to_end/test_nba_player_ratings_hyperparameter_tuning.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/end_to_end/test_nba_prediction_consistency.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/estimator/test_sklearn_estimator.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/feature_generator/test_lag.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/feature_generator/test_regressor_feature_generator.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/feature_generator/test_rolling_against_opponent.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/feature_generator/test_rolling_mean_binary.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/feature_generator/test_rolling_mean_days.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/feature_generator/test_rolling_window.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/hyperparameter_tuning/test_estimator_tuner.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/hyperparameter_tuning/test_rating_tuner.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/performance_transformers/test_performance_manager.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/performance_transformers/test_performances_transformers.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/ratings/test_player_rating_no_mutation.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/ratings/test_ratings_property.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/ratings/test_team_rating_generator.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/ratings/test_utils_scaled_weights.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/scorer/test_score.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/scorer/test_score_aggregation_granularity.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/test_autopipeline.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/test_autopipeline_context.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/test_feature_generator_pipeline.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/transformers/test_estimator_transformer_context.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/transformers/test_net_over_predicted.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/transformers/test_other_transformer.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/transformers/test_predictor_transformer.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/transformers/test_simple_transformer.py +0 -0
- {spforge-0.8.27 → spforge-0.8.30}/tests/transformers/test_team_ratio_predictor.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: spforge
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.30
|
|
4
4
|
Summary: A flexible framework for generating features, ratings, and building machine learning or other models for training and inference on sports data.
|
|
5
5
|
Author-email: Mathias Holmstrøm <mathiasholmstom@gmail.com>
|
|
6
6
|
License: See LICENSE file
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "spforge"
|
|
7
|
-
version = "0.8.
|
|
7
|
+
version = "0.8.30"
|
|
8
8
|
description = "A flexible framework for generating features, ratings, and building machine learning or other models for training and inference on sports data."
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
requires-python = ">=3.11"
|
|
@@ -12,6 +12,8 @@ class ColumnNames:
|
|
|
12
12
|
position: str | None = None
|
|
13
13
|
participation_weight: str | None = None
|
|
14
14
|
projected_participation_weight: str | None = None
|
|
15
|
+
defense_participation_weight: str | None = None
|
|
16
|
+
projected_defense_participation_weight: str | None = None
|
|
15
17
|
update_match_id: str | None = None
|
|
16
18
|
parent_team_id: str | None = None
|
|
17
19
|
team_players_playing_time: str | None = None
|
|
@@ -81,6 +83,8 @@ class MatchPerformance:
|
|
|
81
83
|
performance_value: float | None
|
|
82
84
|
participation_weight: float | None
|
|
83
85
|
projected_participation_weight: float
|
|
86
|
+
defense_participation_weight: float | None = None
|
|
87
|
+
projected_defense_participation_weight: float | None = None
|
|
84
88
|
team_players_playing_time: dict[str, float] | None = None
|
|
85
89
|
opponent_players_playing_time: dict[str, float] | None = None
|
|
86
90
|
|
|
@@ -39,6 +39,8 @@ from spforge.feature_generator._utils import to_polars
|
|
|
39
39
|
PLAYER_STATS = "__PLAYER_STATS"
|
|
40
40
|
_SCALED_PW = "__scaled_participation_weight__"
|
|
41
41
|
_SCALED_PPW = "__scaled_projected_participation_weight__"
|
|
42
|
+
_SCALED_DPW = "__scaled_defense_participation_weight__"
|
|
43
|
+
_SCALED_PDPW = "__scaled_projected_defense_participation_weight__"
|
|
42
44
|
|
|
43
45
|
|
|
44
46
|
class PlayerRatingGenerator(RatingGenerator):
|
|
@@ -166,6 +168,18 @@ class PlayerRatingGenerator(RatingGenerator):
|
|
|
166
168
|
self.start_min_match_count_team_rating = start_min_match_count_team_rating
|
|
167
169
|
self.start_hardcoded_start_rating = start_harcoded_start_rating
|
|
168
170
|
|
|
171
|
+
if hasattr(self._performance_predictor, '_reference_rating'):
|
|
172
|
+
effective_start = self.start_hardcoded_start_rating
|
|
173
|
+
|
|
174
|
+
if effective_start is None and self.start_league_ratings:
|
|
175
|
+
league_ratings = list(self.start_league_ratings.values())
|
|
176
|
+
effective_start = sum(league_ratings) / len(league_ratings)
|
|
177
|
+
|
|
178
|
+
if effective_start is None:
|
|
179
|
+
effective_start = 1000
|
|
180
|
+
|
|
181
|
+
self._performance_predictor._reference_rating = effective_start
|
|
182
|
+
|
|
169
183
|
self.team_id_change_confidence_sum_decrease = team_id_change_confidence_sum_decrease
|
|
170
184
|
self.column_names = column_names
|
|
171
185
|
|
|
@@ -174,6 +188,8 @@ class PlayerRatingGenerator(RatingGenerator):
|
|
|
174
188
|
self.auto_scale_participation_weights = bool(auto_scale_participation_weights)
|
|
175
189
|
self._participation_weight_max: float | None = None
|
|
176
190
|
self._projected_participation_weight_max: float | None = None
|
|
191
|
+
self._defense_participation_weight_max: float | None = None
|
|
192
|
+
self._projected_defense_participation_weight_max: float | None = None
|
|
177
193
|
|
|
178
194
|
self._player_off_ratings: dict[str, PlayerRating] = {}
|
|
179
195
|
self._player_def_ratings: dict[str, PlayerRating] = {}
|
|
@@ -221,8 +237,11 @@ class PlayerRatingGenerator(RatingGenerator):
|
|
|
221
237
|
eps = 1e-6
|
|
222
238
|
return min_val < -eps or max_val > (1.0 + eps)
|
|
223
239
|
|
|
224
|
-
if
|
|
225
|
-
cn.
|
|
240
|
+
if (
|
|
241
|
+
_out_of_bounds(cn.participation_weight)
|
|
242
|
+
or _out_of_bounds(cn.projected_participation_weight)
|
|
243
|
+
or _out_of_bounds(cn.defense_participation_weight)
|
|
244
|
+
or _out_of_bounds(cn.projected_defense_participation_weight)
|
|
226
245
|
):
|
|
227
246
|
self.scale_participation_weights = True
|
|
228
247
|
logging.warning(
|
|
@@ -277,6 +296,25 @@ class PlayerRatingGenerator(RatingGenerator):
|
|
|
277
296
|
elif self._participation_weight_max is not None:
|
|
278
297
|
self._projected_participation_weight_max = self._participation_weight_max
|
|
279
298
|
|
|
299
|
+
if cn.defense_participation_weight and cn.defense_participation_weight in df.columns:
|
|
300
|
+
q_val = pl_df[cn.defense_participation_weight].quantile(0.99, "linear")
|
|
301
|
+
if q_val is not None:
|
|
302
|
+
self._defense_participation_weight_max = float(q_val)
|
|
303
|
+
elif self._participation_weight_max is not None:
|
|
304
|
+
self._defense_participation_weight_max = self._participation_weight_max
|
|
305
|
+
|
|
306
|
+
if (
|
|
307
|
+
cn.projected_defense_participation_weight
|
|
308
|
+
and cn.projected_defense_participation_weight in df.columns
|
|
309
|
+
):
|
|
310
|
+
q_val = pl_df[cn.projected_defense_participation_weight].quantile(0.99, "linear")
|
|
311
|
+
if q_val is not None:
|
|
312
|
+
self._projected_defense_participation_weight_max = float(q_val)
|
|
313
|
+
elif self._defense_participation_weight_max is not None:
|
|
314
|
+
self._projected_defense_participation_weight_max = self._defense_participation_weight_max
|
|
315
|
+
elif self._projected_participation_weight_max is not None:
|
|
316
|
+
self._projected_defense_participation_weight_max = self._projected_participation_weight_max
|
|
317
|
+
|
|
280
318
|
def _scale_participation_weight_columns(self, df: pl.DataFrame) -> pl.DataFrame:
|
|
281
319
|
"""Create internal scaled participation weight columns without mutating originals."""
|
|
282
320
|
if not self.scale_participation_weights:
|
|
@@ -309,6 +347,32 @@ class PlayerRatingGenerator(RatingGenerator):
|
|
|
309
347
|
.alias(_SCALED_PPW)
|
|
310
348
|
)
|
|
311
349
|
|
|
350
|
+
if (
|
|
351
|
+
cn.defense_participation_weight
|
|
352
|
+
and cn.defense_participation_weight in df.columns
|
|
353
|
+
and self._defense_participation_weight_max is not None
|
|
354
|
+
and self._defense_participation_weight_max > 0
|
|
355
|
+
):
|
|
356
|
+
denom = float(self._defense_participation_weight_max)
|
|
357
|
+
df = df.with_columns(
|
|
358
|
+
(pl.col(cn.defense_participation_weight) / denom)
|
|
359
|
+
.clip(0.0, 1.0)
|
|
360
|
+
.alias(_SCALED_DPW)
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
if (
|
|
364
|
+
cn.projected_defense_participation_weight
|
|
365
|
+
and cn.projected_defense_participation_weight in df.columns
|
|
366
|
+
and self._projected_defense_participation_weight_max is not None
|
|
367
|
+
and self._projected_defense_participation_weight_max > 0
|
|
368
|
+
):
|
|
369
|
+
denom = float(self._projected_defense_participation_weight_max)
|
|
370
|
+
df = df.with_columns(
|
|
371
|
+
(pl.col(cn.projected_defense_participation_weight) / denom)
|
|
372
|
+
.clip(0.0, 1.0)
|
|
373
|
+
.alias(_SCALED_PDPW)
|
|
374
|
+
)
|
|
375
|
+
|
|
312
376
|
return df
|
|
313
377
|
|
|
314
378
|
def _get_participation_weight_col(self) -> str:
|
|
@@ -327,7 +391,9 @@ class PlayerRatingGenerator(RatingGenerator):
|
|
|
327
391
|
|
|
328
392
|
def _remove_internal_scaled_columns(self, df: pl.DataFrame) -> pl.DataFrame:
|
|
329
393
|
"""Remove internal scaled columns before returning."""
|
|
330
|
-
cols_to_drop = [
|
|
394
|
+
cols_to_drop = [
|
|
395
|
+
c for c in [_SCALED_PW, _SCALED_PPW, _SCALED_DPW, _SCALED_PDPW] if c in df.columns
|
|
396
|
+
]
|
|
331
397
|
if cols_to_drop:
|
|
332
398
|
df = df.drop(cols_to_drop)
|
|
333
399
|
return df
|
|
@@ -517,6 +583,7 @@ class PlayerRatingGenerator(RatingGenerator):
|
|
|
517
583
|
)
|
|
518
584
|
|
|
519
585
|
perf_value = pre_player.match_performance.performance_value
|
|
586
|
+
|
|
520
587
|
if perf_value is None:
|
|
521
588
|
off_change = 0.0
|
|
522
589
|
else:
|
|
@@ -541,7 +608,7 @@ class PlayerRatingGenerator(RatingGenerator):
|
|
|
541
608
|
def_change = (
|
|
542
609
|
(def_perf - float(pred_def))
|
|
543
610
|
* mult_def
|
|
544
|
-
* float(pre_player.match_performance.
|
|
611
|
+
* float(pre_player.match_performance.defense_participation_weight)
|
|
545
612
|
)
|
|
546
613
|
|
|
547
614
|
if math.isnan(off_change) or math.isnan(def_change):
|
|
@@ -610,6 +677,7 @@ class PlayerRatingGenerator(RatingGenerator):
|
|
|
610
677
|
)
|
|
611
678
|
|
|
612
679
|
perf_value = pre_player.match_performance.performance_value
|
|
680
|
+
|
|
613
681
|
if perf_value is None:
|
|
614
682
|
off_change = 0.0
|
|
615
683
|
else:
|
|
@@ -634,7 +702,7 @@ class PlayerRatingGenerator(RatingGenerator):
|
|
|
634
702
|
def_change = (
|
|
635
703
|
(def_perf - float(pred_def))
|
|
636
704
|
* mult_def
|
|
637
|
-
* float(pre_player.match_performance.
|
|
705
|
+
* float(pre_player.match_performance.defense_participation_weight)
|
|
638
706
|
)
|
|
639
707
|
|
|
640
708
|
if math.isnan(off_change) or math.isnan(def_change):
|
|
@@ -908,6 +976,19 @@ class PlayerRatingGenerator(RatingGenerator):
|
|
|
908
976
|
if _SCALED_PPW in df.columns:
|
|
909
977
|
player_stat_cols.append(_SCALED_PPW)
|
|
910
978
|
|
|
979
|
+
if cn.defense_participation_weight and cn.defense_participation_weight in df.columns:
|
|
980
|
+
player_stat_cols.append(cn.defense_participation_weight)
|
|
981
|
+
if _SCALED_DPW in df.columns:
|
|
982
|
+
player_stat_cols.append(_SCALED_DPW)
|
|
983
|
+
|
|
984
|
+
if (
|
|
985
|
+
cn.projected_defense_participation_weight
|
|
986
|
+
and cn.projected_defense_participation_weight in df.columns
|
|
987
|
+
):
|
|
988
|
+
player_stat_cols.append(cn.projected_defense_participation_weight)
|
|
989
|
+
if _SCALED_PDPW in df.columns:
|
|
990
|
+
player_stat_cols.append(_SCALED_PDPW)
|
|
991
|
+
|
|
911
992
|
if cn.position and cn.position in df.columns:
|
|
912
993
|
player_stat_cols.append(cn.position)
|
|
913
994
|
|
|
@@ -1027,6 +1108,28 @@ class PlayerRatingGenerator(RatingGenerator):
|
|
|
1027
1108
|
projected_participation_weight = participation_weight
|
|
1028
1109
|
projected_participation_weights.append(projected_participation_weight)
|
|
1029
1110
|
|
|
1111
|
+
# Use scaled defense participation weight if available, otherwise default to participation_weight
|
|
1112
|
+
if _SCALED_DPW in team_player:
|
|
1113
|
+
defense_participation_weight = team_player.get(_SCALED_DPW, participation_weight)
|
|
1114
|
+
elif cn.defense_participation_weight:
|
|
1115
|
+
defense_participation_weight = team_player.get(
|
|
1116
|
+
cn.defense_participation_weight, participation_weight
|
|
1117
|
+
)
|
|
1118
|
+
else:
|
|
1119
|
+
defense_participation_weight = participation_weight
|
|
1120
|
+
|
|
1121
|
+
# Use scaled projected defense participation weight if available
|
|
1122
|
+
if _SCALED_PDPW in team_player:
|
|
1123
|
+
projected_defense_participation_weight = team_player.get(
|
|
1124
|
+
_SCALED_PDPW, defense_participation_weight
|
|
1125
|
+
)
|
|
1126
|
+
elif cn.projected_defense_participation_weight:
|
|
1127
|
+
projected_defense_participation_weight = team_player.get(
|
|
1128
|
+
cn.projected_defense_participation_weight, defense_participation_weight
|
|
1129
|
+
)
|
|
1130
|
+
else:
|
|
1131
|
+
projected_defense_participation_weight = defense_participation_weight
|
|
1132
|
+
|
|
1030
1133
|
perf_val = (
|
|
1031
1134
|
float(team_player[self.performance_column])
|
|
1032
1135
|
if (
|
|
@@ -1047,6 +1150,8 @@ class PlayerRatingGenerator(RatingGenerator):
|
|
|
1047
1150
|
performance_value=perf_val,
|
|
1048
1151
|
projected_participation_weight=projected_participation_weight,
|
|
1049
1152
|
participation_weight=participation_weight,
|
|
1153
|
+
defense_participation_weight=defense_participation_weight,
|
|
1154
|
+
projected_defense_participation_weight=projected_defense_participation_weight,
|
|
1050
1155
|
team_players_playing_time=team_playing_time,
|
|
1051
1156
|
opponent_players_playing_time=opponent_playing_time,
|
|
1052
1157
|
)
|
|
@@ -1282,6 +1387,22 @@ class PlayerRatingGenerator(RatingGenerator):
|
|
|
1282
1387
|
ppw = pw
|
|
1283
1388
|
proj_w.append(float(ppw))
|
|
1284
1389
|
|
|
1390
|
+
# Use scaled defense participation weight if available
|
|
1391
|
+
if _SCALED_DPW in tp:
|
|
1392
|
+
dpw = tp.get(_SCALED_DPW, pw)
|
|
1393
|
+
elif cn.defense_participation_weight:
|
|
1394
|
+
dpw = tp.get(cn.defense_participation_weight, pw)
|
|
1395
|
+
else:
|
|
1396
|
+
dpw = pw
|
|
1397
|
+
|
|
1398
|
+
# Use scaled projected defense participation weight if available
|
|
1399
|
+
if _SCALED_PDPW in tp:
|
|
1400
|
+
pdpw = tp.get(_SCALED_PDPW, dpw)
|
|
1401
|
+
elif cn.projected_defense_participation_weight:
|
|
1402
|
+
pdpw = tp.get(cn.projected_defense_participation_weight, dpw)
|
|
1403
|
+
else:
|
|
1404
|
+
pdpw = dpw
|
|
1405
|
+
|
|
1285
1406
|
team_playing_time = self._get_players_playing_time(
|
|
1286
1407
|
tp, cn.team_players_playing_time
|
|
1287
1408
|
)
|
|
@@ -1293,6 +1414,8 @@ class PlayerRatingGenerator(RatingGenerator):
|
|
|
1293
1414
|
performance_value=get_perf_value(tp),
|
|
1294
1415
|
projected_participation_weight=ppw,
|
|
1295
1416
|
participation_weight=pw,
|
|
1417
|
+
defense_participation_weight=dpw,
|
|
1418
|
+
projected_defense_participation_weight=pdpw,
|
|
1296
1419
|
team_players_playing_time=team_playing_time,
|
|
1297
1420
|
opponent_players_playing_time=opponent_playing_time,
|
|
1298
1421
|
)
|
|
@@ -31,6 +31,7 @@ class PlayerPerformancePredictor(ABC):
|
|
|
31
31
|
pass
|
|
32
32
|
|
|
33
33
|
|
|
34
|
+
|
|
34
35
|
class PlayerRatingNonOpponentPerformancePredictor(PlayerPerformancePredictor):
|
|
35
36
|
|
|
36
37
|
def __init__(
|
|
@@ -38,18 +39,22 @@ class PlayerRatingNonOpponentPerformancePredictor(PlayerPerformancePredictor):
|
|
|
38
39
|
coef: float = 0.0015,
|
|
39
40
|
last_sample_count: int = 1500,
|
|
40
41
|
min_count_for_historical_average: int = 200,
|
|
41
|
-
historical_average_value_default: float = 1000,
|
|
42
42
|
):
|
|
43
43
|
self.coef = coef
|
|
44
44
|
self.last_sample_count = last_sample_count
|
|
45
45
|
self.min_count_for_historical_average = min_count_for_historical_average
|
|
46
|
-
self.historical_average_value_default = historical_average_value_default
|
|
47
46
|
if self.min_count_for_historical_average < 1:
|
|
48
47
|
raise ValueError("min_count_for_historical_average must be positive")
|
|
49
|
-
self.
|
|
48
|
+
self._reference_rating: float | None = None
|
|
50
49
|
|
|
51
50
|
def reset(self):
|
|
52
|
-
|
|
51
|
+
pass
|
|
52
|
+
|
|
53
|
+
def _get_reference_rating(self) -> float:
|
|
54
|
+
"""Get reference rating from rating generator, or default to 1000."""
|
|
55
|
+
if self._reference_rating is not None:
|
|
56
|
+
return self._reference_rating
|
|
57
|
+
return 1000
|
|
53
58
|
|
|
54
59
|
def predict_performance(
|
|
55
60
|
self,
|
|
@@ -57,21 +62,14 @@ class PlayerRatingNonOpponentPerformancePredictor(PlayerPerformancePredictor):
|
|
|
57
62
|
opponent_team_rating: PreMatchTeamRating,
|
|
58
63
|
team_rating: PreMatchTeamRating,
|
|
59
64
|
) -> float:
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
if len(recent_prev_entries_ratings) > self.min_count_for_historical_average:
|
|
63
|
-
historical_average_rating = sum(recent_prev_entries_ratings) / len(
|
|
64
|
-
recent_prev_entries_ratings
|
|
65
|
-
)
|
|
66
|
-
else:
|
|
67
|
-
historical_average_rating = self.historical_average_value_default
|
|
65
|
+
historical_average_rating = self._get_reference_rating()
|
|
66
|
+
|
|
68
67
|
net_mean_rating_over_historical_average = (
|
|
69
68
|
player_rating.rating_value - historical_average_rating
|
|
70
69
|
)
|
|
71
70
|
|
|
72
71
|
value = self.coef * net_mean_rating_over_historical_average
|
|
73
72
|
prediction = (math.exp(value)) / (1 + math.exp(value))
|
|
74
|
-
self._prev_entries_ratings.append(player_rating.rating_value)
|
|
75
73
|
|
|
76
74
|
return prediction
|
|
77
75
|
|
|
@@ -267,6 +267,7 @@ class BaseScorer(ABC):
|
|
|
267
267
|
granularity: list[str] | None = None,
|
|
268
268
|
compare_to_naive: bool = False,
|
|
269
269
|
naive_granularity: list[str] | None = None,
|
|
270
|
+
_name_override: str | None = None,
|
|
270
271
|
):
|
|
271
272
|
"""
|
|
272
273
|
:param target: The column name of the target
|
|
@@ -277,6 +278,9 @@ class BaseScorer(ABC):
|
|
|
277
278
|
:param aggregation_level: The columns to group by before calculating the score (e.g., group from game-player to game-team)
|
|
278
279
|
:param aggregation_method: Aggregation methods for pred/target when aggregation_level is set.
|
|
279
280
|
:param granularity: The columns to calculate separate scores for each unique combination (e.g., different scores for each team)
|
|
281
|
+
:param compare_to_naive: If True, returns naive_score - model_score (improvement over naive baseline)
|
|
282
|
+
:param naive_granularity: Granularity for computing naive baseline predictions
|
|
283
|
+
:param _name_override: Override auto-generated name (internal use)
|
|
280
284
|
"""
|
|
281
285
|
self.target = target
|
|
282
286
|
self.pred_column = pred_column
|
|
@@ -295,6 +299,7 @@ class BaseScorer(ABC):
|
|
|
295
299
|
self.granularity = granularity
|
|
296
300
|
self.compare_to_naive = compare_to_naive
|
|
297
301
|
self.naive_granularity = naive_granularity
|
|
302
|
+
self._name_override = _name_override
|
|
298
303
|
|
|
299
304
|
def _resolve_aggregation_method(self, key: str) -> Any:
|
|
300
305
|
if self.aggregation_method is None:
|
|
@@ -359,6 +364,98 @@ class BaseScorer(ABC):
|
|
|
359
364
|
mask = col_mask if mask is None else (mask & col_mask)
|
|
360
365
|
return df.filter(mask)
|
|
361
366
|
|
|
367
|
+
def _get_scorer_id(self) -> str:
|
|
368
|
+
"""Get scorer-specific identifier in snake_case. Override in subclasses if needed."""
|
|
369
|
+
import re
|
|
370
|
+
name = self.__class__.__name__
|
|
371
|
+
# Check if name is all uppercase (acronym like PWMSE)
|
|
372
|
+
if name.isupper():
|
|
373
|
+
return name.lower()
|
|
374
|
+
# Otherwise use regular snake_case conversion
|
|
375
|
+
return re.sub(r'(?<!^)(?=[A-Z])', '_', name).lower()
|
|
376
|
+
|
|
377
|
+
def _format_column_list(self, columns: list[str], max_display: int = 3) -> str:
|
|
378
|
+
"""Format column list with abbreviation for long lists."""
|
|
379
|
+
if len(columns) <= max_display:
|
|
380
|
+
return "+".join(columns)
|
|
381
|
+
shown = "+".join(columns[:max_display])
|
|
382
|
+
remaining = len(columns) - max_display
|
|
383
|
+
return f"{shown}+{remaining}more"
|
|
384
|
+
|
|
385
|
+
def _sanitize_column_name(self, name: str) -> str:
|
|
386
|
+
"""Replace special characters with underscores."""
|
|
387
|
+
import re
|
|
388
|
+
return re.sub(r'[^a-zA-Z0-9_]', '_', name)
|
|
389
|
+
|
|
390
|
+
def _count_user_filters(self) -> int:
|
|
391
|
+
"""Count filters excluding auto-added validation filter."""
|
|
392
|
+
if not self.filters:
|
|
393
|
+
return 0
|
|
394
|
+
if self.validation_column is None:
|
|
395
|
+
return len(self.filters)
|
|
396
|
+
count = 0
|
|
397
|
+
for f in self.filters:
|
|
398
|
+
if not (f.column_name == self.validation_column and
|
|
399
|
+
f.operator == Operator.EQUALS and
|
|
400
|
+
f.value == 1):
|
|
401
|
+
count += 1
|
|
402
|
+
return count
|
|
403
|
+
|
|
404
|
+
def _generate_name(self) -> str:
|
|
405
|
+
"""Generate readable name from scorer configuration."""
|
|
406
|
+
parts = []
|
|
407
|
+
|
|
408
|
+
parts.append(self._get_scorer_id())
|
|
409
|
+
|
|
410
|
+
parts.append(self._sanitize_column_name(self.target))
|
|
411
|
+
|
|
412
|
+
if self.granularity:
|
|
413
|
+
gran_str = self._format_column_list(self.granularity)
|
|
414
|
+
parts.append(f"gran:{gran_str}")
|
|
415
|
+
|
|
416
|
+
if self.compare_to_naive:
|
|
417
|
+
if self.naive_granularity:
|
|
418
|
+
naive_str = self._format_column_list(self.naive_granularity)
|
|
419
|
+
parts.append(f"naive:{naive_str}")
|
|
420
|
+
else:
|
|
421
|
+
parts.append("naive")
|
|
422
|
+
|
|
423
|
+
if self.aggregation_level:
|
|
424
|
+
agg_str = self._format_column_list(self.aggregation_level)
|
|
425
|
+
parts.append(f"agg:{agg_str}")
|
|
426
|
+
|
|
427
|
+
filter_count = self._count_user_filters()
|
|
428
|
+
if filter_count > 0:
|
|
429
|
+
parts.append(f"filters:{filter_count}")
|
|
430
|
+
|
|
431
|
+
return "_".join(parts)
|
|
432
|
+
|
|
433
|
+
@property
|
|
434
|
+
def name(self) -> str:
|
|
435
|
+
"""
|
|
436
|
+
Generate a human-readable name for this scorer.
|
|
437
|
+
|
|
438
|
+
Returns descriptive name based on scorer configuration including
|
|
439
|
+
target, granularity, naive comparison, aggregation, and filters.
|
|
440
|
+
Only includes components that are actually set (non-None/non-empty).
|
|
441
|
+
|
|
442
|
+
Format: {scorer_id}_{target}[_gran:{cols}][_naive[:cols]][_agg:{cols}][_filters:{n}]
|
|
443
|
+
|
|
444
|
+
Can be overridden by passing _name_override to constructor.
|
|
445
|
+
|
|
446
|
+
Examples:
|
|
447
|
+
>>> scorer = MeanBiasScorer(target="points", pred_column="pred")
|
|
448
|
+
>>> scorer.name
|
|
449
|
+
'mean_bias_scorer_points'
|
|
450
|
+
|
|
451
|
+
>>> scorer = MeanBiasScorer(target="points", granularity=["team_id"], compare_to_naive=True)
|
|
452
|
+
>>> scorer.name
|
|
453
|
+
'mean_bias_scorer_points_gran:team_id_naive'
|
|
454
|
+
"""
|
|
455
|
+
if hasattr(self, '_name_override') and self._name_override is not None:
|
|
456
|
+
return self._name_override
|
|
457
|
+
return self._generate_name()
|
|
458
|
+
|
|
362
459
|
@abstractmethod
|
|
363
460
|
def score(self, df: IntoFrameT) -> float | dict[tuple, float]:
|
|
364
461
|
"""
|
|
@@ -385,6 +482,7 @@ class PWMSE(BaseScorer):
|
|
|
385
482
|
compare_to_naive: bool = False,
|
|
386
483
|
naive_granularity: list[str] | None = None,
|
|
387
484
|
evaluation_labels: list[int] | None = None,
|
|
485
|
+
_name_override: str | None = None,
|
|
388
486
|
):
|
|
389
487
|
self.pred_column_name = pred_column
|
|
390
488
|
super().__init__(
|
|
@@ -397,6 +495,7 @@ class PWMSE(BaseScorer):
|
|
|
397
495
|
validation_column=validation_column,
|
|
398
496
|
compare_to_naive=compare_to_naive,
|
|
399
497
|
naive_granularity=naive_granularity,
|
|
498
|
+
_name_override=_name_override,
|
|
400
499
|
)
|
|
401
500
|
self.labels = labels
|
|
402
501
|
self.evaluation_labels = evaluation_labels
|
|
@@ -553,6 +652,7 @@ class MeanBiasScorer(BaseScorer):
|
|
|
553
652
|
labels: list[int] | None = None,
|
|
554
653
|
compare_to_naive: bool = False,
|
|
555
654
|
naive_granularity: list[str] | None = None,
|
|
655
|
+
_name_override: str | None = None,
|
|
556
656
|
):
|
|
557
657
|
"""
|
|
558
658
|
:param pred_column: The column name of the predictions
|
|
@@ -563,6 +663,7 @@ class MeanBiasScorer(BaseScorer):
|
|
|
563
663
|
:param granularity: The columns to calculate separate scores for each unique combination (e.g., different scores for each team)
|
|
564
664
|
:param filters: The filters to apply before calculating
|
|
565
665
|
:param labels: The labels corresponding to each index in probability distributions (e.g., [-5, -4, ..., 35] for rush yards)
|
|
666
|
+
:param _name_override: Override auto-generated name (internal use)
|
|
566
667
|
"""
|
|
567
668
|
|
|
568
669
|
self.pred_column_name = pred_column
|
|
@@ -577,6 +678,7 @@ class MeanBiasScorer(BaseScorer):
|
|
|
577
678
|
validation_column=validation_column,
|
|
578
679
|
compare_to_naive=compare_to_naive,
|
|
579
680
|
naive_granularity=naive_granularity,
|
|
681
|
+
_name_override=_name_override,
|
|
580
682
|
)
|
|
581
683
|
|
|
582
684
|
def _mean_bias_score(self, df: IntoFrameT) -> float:
|
|
@@ -691,6 +793,7 @@ class SklearnScorer(BaseScorer):
|
|
|
691
793
|
params: dict[str, Any] = None,
|
|
692
794
|
compare_to_naive: bool = False,
|
|
693
795
|
naive_granularity: list[str] | None = None,
|
|
796
|
+
_name_override: str | None = None,
|
|
694
797
|
):
|
|
695
798
|
"""
|
|
696
799
|
:param pred_column: The column name of the predictions
|
|
@@ -701,6 +804,7 @@ class SklearnScorer(BaseScorer):
|
|
|
701
804
|
:param aggregation_level: The columns to group by before calculating the score (e.g., group from game-player to game-team)
|
|
702
805
|
:param granularity: The columns to calculate separate scores for each unique combination (e.g., different scores for each team)
|
|
703
806
|
:param filters: The filters to apply before calculating
|
|
807
|
+
:param _name_override: Override auto-generated name (internal use)
|
|
704
808
|
"""
|
|
705
809
|
|
|
706
810
|
super().__init__(
|
|
@@ -713,11 +817,22 @@ class SklearnScorer(BaseScorer):
|
|
|
713
817
|
validation_column=validation_column,
|
|
714
818
|
compare_to_naive=compare_to_naive,
|
|
715
819
|
naive_granularity=naive_granularity,
|
|
820
|
+
_name_override=_name_override,
|
|
716
821
|
)
|
|
717
822
|
self.pred_column_name = pred_column
|
|
718
823
|
self.scorer_function = scorer_function
|
|
719
824
|
self.params = params or {}
|
|
720
825
|
|
|
826
|
+
def _get_scorer_id(self) -> str:
|
|
827
|
+
"""Use the scorer function name."""
|
|
828
|
+
if hasattr(self.scorer_function, '__name__'):
|
|
829
|
+
name = self.scorer_function.__name__
|
|
830
|
+
# Handle lambda functions
|
|
831
|
+
if name == '<lambda>':
|
|
832
|
+
return "custom_metric"
|
|
833
|
+
return name
|
|
834
|
+
return "custom_metric"
|
|
835
|
+
|
|
721
836
|
def _pad_probabilities(
|
|
722
837
|
self, y_true: list[Any], probabilities: list[list[float]]
|
|
723
838
|
) -> tuple[list[list[float]], dict[str, Any]]:
|
|
@@ -827,6 +942,7 @@ class ProbabilisticMeanBias(BaseScorer):
|
|
|
827
942
|
filters: list[Filter] | None = None,
|
|
828
943
|
compare_to_naive: bool = False,
|
|
829
944
|
naive_granularity: list[str] | None = None,
|
|
945
|
+
_name_override: str | None = None,
|
|
830
946
|
):
|
|
831
947
|
|
|
832
948
|
self.pred_column_name = pred_column
|
|
@@ -841,6 +957,7 @@ class ProbabilisticMeanBias(BaseScorer):
|
|
|
841
957
|
validation_column=validation_column,
|
|
842
958
|
compare_to_naive=compare_to_naive,
|
|
843
959
|
naive_granularity=naive_granularity,
|
|
960
|
+
_name_override=_name_override,
|
|
844
961
|
)
|
|
845
962
|
|
|
846
963
|
def _aggregate_pandas_series(
|
|
@@ -1064,6 +1181,7 @@ class OrdinalLossScorer(BaseScorer):
|
|
|
1064
1181
|
labels: list[int] | None = None,
|
|
1065
1182
|
compare_to_naive: bool = False,
|
|
1066
1183
|
naive_granularity: list[str] | None = None,
|
|
1184
|
+
_name_override: str | None = None,
|
|
1067
1185
|
):
|
|
1068
1186
|
self.pred_column_name = pred_column
|
|
1069
1187
|
super().__init__(
|
|
@@ -1076,6 +1194,7 @@ class OrdinalLossScorer(BaseScorer):
|
|
|
1076
1194
|
validation_column=validation_column,
|
|
1077
1195
|
compare_to_naive=compare_to_naive,
|
|
1078
1196
|
naive_granularity=naive_granularity,
|
|
1197
|
+
_name_override=_name_override,
|
|
1079
1198
|
)
|
|
1080
1199
|
self.classes = classes
|
|
1081
1200
|
|
|
@@ -1263,6 +1382,7 @@ class ThresholdEventScorer(BaseScorer):
|
|
|
1263
1382
|
filters: list["Filter"] | None = None,
|
|
1264
1383
|
compare_to_naive: bool = False,
|
|
1265
1384
|
naive_granularity: list[str] | None = None,
|
|
1385
|
+
_name_override: str | None = None,
|
|
1266
1386
|
):
|
|
1267
1387
|
self.pred_column_name = dist_column
|
|
1268
1388
|
super().__init__(
|
|
@@ -1275,6 +1395,7 @@ class ThresholdEventScorer(BaseScorer):
|
|
|
1275
1395
|
validation_column=validation_column,
|
|
1276
1396
|
compare_to_naive=compare_to_naive,
|
|
1277
1397
|
naive_granularity=naive_granularity,
|
|
1398
|
+
_name_override=_name_override,
|
|
1278
1399
|
)
|
|
1279
1400
|
|
|
1280
1401
|
self.dist_column = dist_column
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: spforge
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.30
|
|
4
4
|
Summary: A flexible framework for generating features, ratings, and building machine learning or other models for training and inference on sports data.
|
|
5
5
|
Author-email: Mathias Holmstrøm <mathiasholmstom@gmail.com>
|
|
6
6
|
License: See LICENSE file
|
|
@@ -109,6 +109,7 @@ tests/ratings/test_team_rating_generator.py
|
|
|
109
109
|
tests/ratings/test_utils_scaled_weights.py
|
|
110
110
|
tests/scorer/test_score.py
|
|
111
111
|
tests/scorer/test_score_aggregation_granularity.py
|
|
112
|
+
tests/scorer/test_scorer_name.py
|
|
112
113
|
tests/transformers/test_estimator_transformer_context.py
|
|
113
114
|
tests/transformers/test_net_over_predicted.py
|
|
114
115
|
tests/transformers/test_other_transformer.py
|