spforge 0.8.16__tar.gz → 0.8.17__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of spforge might be problematic. Click here for more details.
- {spforge-0.8.16/spforge.egg-info → spforge-0.8.17}/PKG-INFO +1 -1
- {spforge-0.8.16 → spforge-0.8.17}/pyproject.toml +1 -1
- {spforge-0.8.16 → spforge-0.8.17}/spforge/scorer/_score.py +42 -11
- {spforge-0.8.16 → spforge-0.8.17/spforge.egg-info}/PKG-INFO +1 -1
- {spforge-0.8.16 → spforge-0.8.17}/tests/scorer/test_score.py +142 -0
- {spforge-0.8.16 → spforge-0.8.17}/LICENSE +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/MANIFEST.in +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/README.md +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/examples/__init__.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/examples/game_level_example.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/examples/lol/__init__.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/examples/lol/data/__init__.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/examples/lol/data/subsample_lol_data.parquet +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/examples/lol/data/utils.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/examples/lol/pipeline_transformer_example.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/examples/nba/__init__.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/examples/nba/cross_validation_example.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/examples/nba/data/__init__.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/examples/nba/data/game_player_subsample.parquet +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/examples/nba/data/utils.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/examples/nba/feature_engineering_example.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/examples/nba/game_winner_example.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/examples/nba/predictor_transformers_example.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/setup.cfg +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/__init__.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/autopipeline.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/base_feature_generator.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/cross_validator/__init__.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/cross_validator/_base.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/cross_validator/cross_validator.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/data_structures.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/distributions/__init__.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/distributions/_negative_binomial_estimator.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/distributions/_normal_distribution_predictor.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/distributions/_student_t_distribution_estimator.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/estimator/__init__.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/estimator/_conditional_estimator.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/estimator/_frequency_bucketing_classifier.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/estimator/_granularity_estimator.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/estimator/_group_by_estimator.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/estimator/_ordinal_classifier.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/estimator/_sklearn_enhancer_estimator.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/feature_generator/__init__.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/feature_generator/_base.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/feature_generator/_lag.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/feature_generator/_net_over_predicted.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/feature_generator/_regressor_feature_generator.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/feature_generator/_rolling_against_opponent.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/feature_generator/_rolling_mean_binary.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/feature_generator/_rolling_mean_days.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/feature_generator/_rolling_window.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/feature_generator/_utils.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/features_generator_pipeline.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/hyperparameter_tuning/__init__.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/hyperparameter_tuning/_default_search_spaces.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/hyperparameter_tuning/_tuner.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/performance_transformers/__init__.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/performance_transformers/_performance_manager.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/performance_transformers/_performances_transformers.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/ratings/__init__.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/ratings/_base.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/ratings/_player_rating.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/ratings/_team_rating.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/ratings/enums.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/ratings/league_identifier.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/ratings/league_start_rating_optimizer.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/ratings/player_performance_predictor.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/ratings/start_rating_generator.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/ratings/team_performance_predictor.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/ratings/team_start_rating_generator.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/ratings/utils.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/scorer/__init__.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/transformers/__init__.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/transformers/_base.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/transformers/_net_over_predicted.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/transformers/_operator.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/transformers/_other_transformer.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/transformers/_predictor.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/transformers/_simple_transformer.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/transformers/_team_ratio_predictor.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge/utils.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge.egg-info/SOURCES.txt +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge.egg-info/dependency_links.txt +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge.egg-info/requires.txt +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/spforge.egg-info/top_level.txt +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/cross_validator/test_cross_validator.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/distributions/test_distribution.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/end_to_end/test_estimator_hyperparameter_tuning.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/end_to_end/test_league_start_rating_optimizer.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/end_to_end/test_lol_player_kills.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/end_to_end/test_nba_player_points.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/end_to_end/test_nba_player_ratings_hyperparameter_tuning.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/end_to_end/test_nba_prediction_consistency.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/estimator/test_sklearn_estimator.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/feature_generator/test_lag.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/feature_generator/test_regressor_feature_generator.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/feature_generator/test_rolling_against_opponent.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/feature_generator/test_rolling_mean_binary.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/feature_generator/test_rolling_mean_days.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/feature_generator/test_rolling_window.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/hyperparameter_tuning/test_estimator_tuner.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/hyperparameter_tuning/test_rating_tuner.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/performance_transformers/test_performance_manager.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/performance_transformers/test_performances_transformers.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/ratings/test_player_rating_generator.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/ratings/test_player_rating_no_mutation.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/ratings/test_ratings_property.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/ratings/test_team_rating_generator.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/ratings/test_utils_scaled_weights.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/scorer/test_score_aggregation_granularity.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/test_autopipeline.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/test_autopipeline_context.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/test_feature_generator_pipeline.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/transformers/test_estimator_transformer_context.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/transformers/test_net_over_predicted.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/transformers/test_other_transformer.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/transformers/test_predictor_transformer.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/transformers/test_simple_transformer.py +0 -0
- {spforge-0.8.16 → spforge-0.8.17}/tests/transformers/test_team_ratio_predictor.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: spforge
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.17
|
|
4
4
|
Summary: A flexible framework for generating features, ratings, and building machine learning or other models for training and inference on sports data.
|
|
5
5
|
Author-email: Mathias Holmstrøm <mathiasholmstom@gmail.com>
|
|
6
6
|
License: See LICENSE file
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "spforge"
|
|
7
|
-
version = "0.8.
|
|
7
|
+
version = "0.8.17"
|
|
8
8
|
description = "A flexible framework for generating features, ratings, and building machine learning or other models for training and inference on sports data."
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
requires-python = ">=3.11"
|
|
@@ -366,18 +366,49 @@ class PWMSE(BaseScorer):
|
|
|
366
366
|
self.labels = labels
|
|
367
367
|
self.evaluation_labels = evaluation_labels
|
|
368
368
|
|
|
369
|
+
self._needs_extension = False
|
|
370
|
+
self._needs_slicing = False
|
|
369
371
|
self._eval_indices: list[int] | None = None
|
|
372
|
+
self._extension_mapping: dict[int, int] | None = None
|
|
373
|
+
|
|
370
374
|
if self.evaluation_labels is not None and self.labels is not None:
|
|
371
|
-
|
|
372
|
-
|
|
375
|
+
training_set = set(self.labels)
|
|
376
|
+
eval_set = set(self.evaluation_labels)
|
|
377
|
+
|
|
378
|
+
if eval_set <= training_set:
|
|
379
|
+
self._needs_slicing = True
|
|
380
|
+
label_to_idx = {lbl: i for i, lbl in enumerate(self.labels)}
|
|
381
|
+
self._eval_indices = [label_to_idx[lbl] for lbl in self.evaluation_labels]
|
|
382
|
+
elif training_set <= eval_set:
|
|
383
|
+
self._needs_extension = True
|
|
384
|
+
eval_label_to_idx = {lbl: i for i, lbl in enumerate(self.evaluation_labels)}
|
|
385
|
+
self._extension_mapping = {
|
|
386
|
+
train_idx: eval_label_to_idx[lbl]
|
|
387
|
+
for train_idx, lbl in enumerate(self.labels)
|
|
388
|
+
}
|
|
389
|
+
else:
|
|
390
|
+
raise ValueError(
|
|
391
|
+
f"evaluation_labels must be a subset or superset of labels. "
|
|
392
|
+
f"labels={self.labels}, evaluation_labels={self.evaluation_labels}"
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
def _align_predictions(self, preds: np.ndarray) -> np.ndarray:
|
|
396
|
+
if self._needs_slicing and self._eval_indices is not None:
|
|
397
|
+
sliced = preds[:, self._eval_indices]
|
|
398
|
+
row_sums = sliced.sum(axis=1, keepdims=True)
|
|
399
|
+
row_sums = np.where(row_sums == 0, 1.0, row_sums)
|
|
400
|
+
return sliced / row_sums
|
|
401
|
+
|
|
402
|
+
if self._needs_extension and self._extension_mapping is not None:
|
|
403
|
+
n_samples = preds.shape[0]
|
|
404
|
+
n_eval_labels = len(self.evaluation_labels)
|
|
405
|
+
extended = np.full((n_samples, n_eval_labels), 1e-5, dtype=np.float64)
|
|
406
|
+
for train_idx, eval_idx in self._extension_mapping.items():
|
|
407
|
+
extended[:, eval_idx] = preds[:, train_idx]
|
|
408
|
+
row_sums = extended.sum(axis=1, keepdims=True)
|
|
409
|
+
return extended / row_sums
|
|
373
410
|
|
|
374
|
-
|
|
375
|
-
if self._eval_indices is None:
|
|
376
|
-
return preds
|
|
377
|
-
sliced = preds[:, self._eval_indices]
|
|
378
|
-
row_sums = sliced.sum(axis=1, keepdims=True)
|
|
379
|
-
row_sums = np.where(row_sums == 0, 1.0, row_sums)
|
|
380
|
-
return sliced / row_sums
|
|
411
|
+
return preds
|
|
381
412
|
|
|
382
413
|
def _get_scoring_labels(self) -> list[int]:
|
|
383
414
|
if self.evaluation_labels is not None:
|
|
@@ -446,7 +477,7 @@ class PWMSE(BaseScorer):
|
|
|
446
477
|
|
|
447
478
|
targets = gran_df[self.target].to_numpy().astype(np.float64)
|
|
448
479
|
preds = np.asarray(gran_df[self.pred_column].to_list(), dtype=np.float64)
|
|
449
|
-
preds = self.
|
|
480
|
+
preds = self._align_predictions(preds)
|
|
450
481
|
score = self._pwmse_score(targets, preds)
|
|
451
482
|
if self.compare_to_naive:
|
|
452
483
|
naive_probs_list = _naive_probability_predictions_for_df(
|
|
@@ -464,7 +495,7 @@ class PWMSE(BaseScorer):
|
|
|
464
495
|
|
|
465
496
|
targets = df[self.target].to_numpy().astype(np.float64)
|
|
466
497
|
preds = np.asarray(df[self.pred_column].to_list(), dtype=np.float64)
|
|
467
|
-
preds = self.
|
|
498
|
+
preds = self._align_predictions(preds)
|
|
468
499
|
score = self._pwmse_score(targets, preds)
|
|
469
500
|
if self.compare_to_naive:
|
|
470
501
|
naive_probs_list = _naive_probability_predictions_for_df(
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: spforge
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.17
|
|
4
4
|
Summary: A flexible framework for generating features, ratings, and building machine learning or other models for training and inference on sports data.
|
|
5
5
|
Author-email: Mathias Holmstrøm <mathiasholmstom@gmail.com>
|
|
6
6
|
License: See LICENSE file
|
|
@@ -2138,3 +2138,145 @@ def test_scorers_respect_validation_column(scorer_factory, df_factory):
|
|
|
2138
2138
|
score_all = scorer_factory().score(df)
|
|
2139
2139
|
score_valid = scorer_factory().score(df_valid)
|
|
2140
2140
|
assert score_all == score_valid
|
|
2141
|
+
|
|
2142
|
+
|
|
2143
|
+
# ============================================================================
|
|
2144
|
+
# PWMSE evaluation_labels Extension Tests
|
|
2145
|
+
# ============================================================================
|
|
2146
|
+
|
|
2147
|
+
|
|
2148
|
+
@pytest.mark.parametrize("df_type", [pl.DataFrame, pd.DataFrame])
|
|
2149
|
+
def test_pwmse__evaluation_labels_extends_predictions(df_type):
|
|
2150
|
+
"""PWMSE with evaluation_labels as superset extends predictions with small probs."""
|
|
2151
|
+
df = create_dataframe(
|
|
2152
|
+
df_type,
|
|
2153
|
+
{
|
|
2154
|
+
"pred": [
|
|
2155
|
+
[0.3, 0.5, 0.2],
|
|
2156
|
+
[0.2, 0.6, 0.2],
|
|
2157
|
+
],
|
|
2158
|
+
"target": [0, 1],
|
|
2159
|
+
},
|
|
2160
|
+
)
|
|
2161
|
+
|
|
2162
|
+
scorer = PWMSE(
|
|
2163
|
+
pred_column="pred",
|
|
2164
|
+
target="target",
|
|
2165
|
+
labels=[0, 1, 2],
|
|
2166
|
+
evaluation_labels=[-1, 0, 1, 2, 3],
|
|
2167
|
+
)
|
|
2168
|
+
score = scorer.score(df)
|
|
2169
|
+
|
|
2170
|
+
n_eval_labels = 5
|
|
2171
|
+
eps = 1e-5
|
|
2172
|
+
preds_original = np.array([[0.3, 0.5, 0.2], [0.2, 0.6, 0.2]])
|
|
2173
|
+
extended = np.full((2, n_eval_labels), eps, dtype=np.float64)
|
|
2174
|
+
extended[:, 1] = preds_original[:, 0]
|
|
2175
|
+
extended[:, 2] = preds_original[:, 1]
|
|
2176
|
+
extended[:, 3] = preds_original[:, 2]
|
|
2177
|
+
row_sums = extended.sum(axis=1, keepdims=True)
|
|
2178
|
+
preds_renorm = extended / row_sums
|
|
2179
|
+
|
|
2180
|
+
eval_labels = np.array([-1, 0, 1, 2, 3], dtype=np.float64)
|
|
2181
|
+
targets = np.array([0, 1], dtype=np.float64)
|
|
2182
|
+
diffs_sqd = (eval_labels[None, :] - targets[:, None]) ** 2
|
|
2183
|
+
expected = float((diffs_sqd * preds_renorm).sum(axis=1).mean())
|
|
2184
|
+
|
|
2185
|
+
assert abs(score - expected) < 1e-10
|
|
2186
|
+
|
|
2187
|
+
|
|
2188
|
+
@pytest.mark.parametrize("df_type", [pl.DataFrame, pd.DataFrame])
|
|
2189
|
+
def test_pwmse__evaluation_labels_exact_match(df_type):
|
|
2190
|
+
"""PWMSE with evaluation_labels identical to labels (no-op)."""
|
|
2191
|
+
df = create_dataframe(
|
|
2192
|
+
df_type,
|
|
2193
|
+
{
|
|
2194
|
+
"pred": [
|
|
2195
|
+
[0.3, 0.5, 0.2],
|
|
2196
|
+
[0.2, 0.6, 0.2],
|
|
2197
|
+
],
|
|
2198
|
+
"target": [0, 1],
|
|
2199
|
+
},
|
|
2200
|
+
)
|
|
2201
|
+
|
|
2202
|
+
scorer_with_eval = PWMSE(
|
|
2203
|
+
pred_column="pred",
|
|
2204
|
+
target="target",
|
|
2205
|
+
labels=[0, 1, 2],
|
|
2206
|
+
evaluation_labels=[0, 1, 2],
|
|
2207
|
+
)
|
|
2208
|
+
scorer_without_eval = PWMSE(
|
|
2209
|
+
pred_column="pred",
|
|
2210
|
+
target="target",
|
|
2211
|
+
labels=[0, 1, 2],
|
|
2212
|
+
)
|
|
2213
|
+
|
|
2214
|
+
score_with = scorer_with_eval.score(df)
|
|
2215
|
+
score_without = scorer_without_eval.score(df)
|
|
2216
|
+
|
|
2217
|
+
assert abs(score_with - score_without) < 1e-10
|
|
2218
|
+
|
|
2219
|
+
|
|
2220
|
+
@pytest.mark.parametrize("df_type", [pl.DataFrame, pd.DataFrame])
|
|
2221
|
+
def test_pwmse__evaluation_labels_partial_overlap_raises(df_type):
|
|
2222
|
+
"""PWMSE with partial overlap between labels and evaluation_labels raises."""
|
|
2223
|
+
with pytest.raises(ValueError, match="evaluation_labels must be a subset or superset"):
|
|
2224
|
+
PWMSE(
|
|
2225
|
+
pred_column="pred",
|
|
2226
|
+
target="target",
|
|
2227
|
+
labels=[0, 1, 2],
|
|
2228
|
+
evaluation_labels=[1, 2, 3],
|
|
2229
|
+
)
|
|
2230
|
+
|
|
2231
|
+
|
|
2232
|
+
@pytest.mark.parametrize("df_type", [pl.DataFrame, pd.DataFrame])
|
|
2233
|
+
def test_pwmse__evaluation_labels_extends_with_compare_to_naive(df_type):
|
|
2234
|
+
"""PWMSE extension mode works correctly with compare_to_naive."""
|
|
2235
|
+
df = create_dataframe(
|
|
2236
|
+
df_type,
|
|
2237
|
+
{
|
|
2238
|
+
"pred": [
|
|
2239
|
+
[0.8, 0.15, 0.05],
|
|
2240
|
+
[0.1, 0.7, 0.2],
|
|
2241
|
+
[0.05, 0.15, 0.8],
|
|
2242
|
+
[0.3, 0.4, 0.3],
|
|
2243
|
+
],
|
|
2244
|
+
"target": [0, 1, 2, 1],
|
|
2245
|
+
},
|
|
2246
|
+
)
|
|
2247
|
+
|
|
2248
|
+
scorer = PWMSE(
|
|
2249
|
+
pred_column="pred",
|
|
2250
|
+
target="target",
|
|
2251
|
+
labels=[0, 1, 2],
|
|
2252
|
+
evaluation_labels=[-1, 0, 1, 2, 3],
|
|
2253
|
+
compare_to_naive=True,
|
|
2254
|
+
)
|
|
2255
|
+
score = scorer.score(df)
|
|
2256
|
+
|
|
2257
|
+
n_eval_labels = 5
|
|
2258
|
+
eps = 1e-5
|
|
2259
|
+
preds_original = np.array([
|
|
2260
|
+
[0.8, 0.15, 0.05],
|
|
2261
|
+
[0.1, 0.7, 0.2],
|
|
2262
|
+
[0.05, 0.15, 0.8],
|
|
2263
|
+
[0.3, 0.4, 0.3],
|
|
2264
|
+
])
|
|
2265
|
+
extended = np.full((4, n_eval_labels), eps, dtype=np.float64)
|
|
2266
|
+
extended[:, 1] = preds_original[:, 0]
|
|
2267
|
+
extended[:, 2] = preds_original[:, 1]
|
|
2268
|
+
extended[:, 3] = preds_original[:, 2]
|
|
2269
|
+
row_sums = extended.sum(axis=1, keepdims=True)
|
|
2270
|
+
preds_renorm = extended / row_sums
|
|
2271
|
+
|
|
2272
|
+
eval_labels = np.array([-1, 0, 1, 2, 3], dtype=np.float64)
|
|
2273
|
+
targets = np.array([0, 1, 2, 1], dtype=np.float64)
|
|
2274
|
+
diffs_sqd = (eval_labels[None, :] - targets[:, None]) ** 2
|
|
2275
|
+
model_score = float((diffs_sqd * preds_renorm).sum(axis=1).mean())
|
|
2276
|
+
|
|
2277
|
+
naive_probs = np.array([0.0, 0.25, 0.5, 0.25, 0.0])
|
|
2278
|
+
naive_preds = np.tile(naive_probs, (4, 1))
|
|
2279
|
+
naive_score = float((diffs_sqd * naive_preds).sum(axis=1).mean())
|
|
2280
|
+
|
|
2281
|
+
expected = naive_score - model_score
|
|
2282
|
+
assert abs(score - expected) < 1e-10
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{spforge-0.8.16 → spforge-0.8.17}/spforge/distributions/_student_t_distribution_estimator.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{spforge-0.8.16 → spforge-0.8.17}/spforge/performance_transformers/_performances_transformers.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{spforge-0.8.16 → spforge-0.8.17}/tests/end_to_end/test_nba_player_ratings_hyperparameter_tuning.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{spforge-0.8.16 → spforge-0.8.17}/tests/feature_generator/test_regressor_feature_generator.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{spforge-0.8.16 → spforge-0.8.17}/tests/performance_transformers/test_performance_manager.py
RENAMED
|
File without changes
|
{spforge-0.8.16 → spforge-0.8.17}/tests/performance_transformers/test_performances_transformers.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|