rm-utils 1.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rm_utils-1.0.0/LICENSE +21 -0
- rm_utils-1.0.0/PKG-INFO +74 -0
- rm_utils-1.0.0/docs/PYPI.md +41 -0
- rm_utils-1.0.0/pyproject.toml +46 -0
- rm_utils-1.0.0/rm_utils/assets/logo.png +0 -0
- rm_utils-1.0.0/rm_utils/metrics/__init__.py +1 -0
- rm_utils-1.0.0/rm_utils/metrics/calculator.py +184 -0
- rm_utils-1.0.0/rm_utils/metrics/metric_funcs.py +75 -0
- rm_utils-1.0.0/rm_utils/psi/__init__.py +1 -0
- rm_utils-1.0.0/rm_utils/psi/stability.py +1140 -0
- rm_utils-1.0.0/rm_utils/reports/__init__.py +1 -0
- rm_utils-1.0.0/rm_utils/reports/reporter.py +296 -0
- rm_utils-1.0.0/rm_utils/sql/db_connectors.py +308 -0
rm_utils-1.0.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Nikita Emelyanov
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
rm_utils-1.0.0/PKG-INFO
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: rm-utils
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary:
|
|
5
|
+
License: MIT
|
|
6
|
+
License-File: LICENSE
|
|
7
|
+
Author: n-emelianov
|
|
8
|
+
Author-email: limbolume2023@gmail.com
|
|
9
|
+
Requires-Python: >=3.12, <3.15
|
|
10
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
11
|
+
Classifier: Programming Language :: Python :: 3
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
15
|
+
Requires-Dist: clickhouse-connect (>=0.10.0,<0.11.0)
|
|
16
|
+
Requires-Dist: matplotlib (>=3.10.8,<4.0.0)
|
|
17
|
+
Requires-Dist: openpyxl (>=3.1.5,<4.0.0)
|
|
18
|
+
Requires-Dist: pandas (>=3.0.0,<4.0.0)
|
|
19
|
+
Requires-Dist: paramiko (<2.12.0)
|
|
20
|
+
Requires-Dist: pillow (>=12.1.0,<13.0.0)
|
|
21
|
+
Requires-Dist: psycopg2-binary (>=2.9.11,<3.0.0)
|
|
22
|
+
Requires-Dist: scikit-learn (>=1.8.0,<2.0.0)
|
|
23
|
+
Requires-Dist: sqlalchemy (>=2.0.46,<3.0.0)
|
|
24
|
+
Requires-Dist: sqlparse (>=0.5.5,<0.6.0)
|
|
25
|
+
Requires-Dist: sshtunnel (>=0.4.0,<0.5.0)
|
|
26
|
+
Project-URL: Changelog, https://github.com/n-emelyanov/RM_UTILS/blob/master/docs/CHANGELOG.md
|
|
27
|
+
Project-URL: Documentation, https://github.com/n-emelyanov/RM_UTILS#readme
|
|
28
|
+
Project-URL: Homepage, https://github.com/n-emelyanov/RM_UTILS
|
|
29
|
+
Project-URL: Issues, https://github.com/n-emelyanov/RM_UTILS/issues
|
|
30
|
+
Project-URL: Repository, https://github.com/n-emelyanov/RM_UTILS.git
|
|
31
|
+
Description-Content-Type: text/markdown
|
|
32
|
+
|
|
33
|
+
# Библиотека утилит для риск-менеджмента ML-моделей
|
|
34
|
+
|
|
35
|
+

|
|
36
|
+
|
|
37
|
+
Библиотека разработана для помощи в реализации и мониторинге рисковых ML-моделей в финансовой и других риск-ориентированных областях. Включает инструменты для расчета метрик, анализа стабильности, генерации отчетов и работы с данными.
|
|
38
|
+
|
|
39
|
+
## Ключевые возможности
|
|
40
|
+
- Расчет PSI (Population Stability Index) и других метрик стабильности
|
|
41
|
+
- Специализированные метрики для оценки качества ML-моделей в риск-менеджменте
|
|
42
|
+
- Генерация автоматизированных отчетов
|
|
43
|
+
- Утилиты для работы с SQL-запросами
|
|
44
|
+
- Инструменты для обработки и анализа данных
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
## Установка
|
|
48
|
+
```bash
|
|
49
|
+
pip install rm-utils
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
## Использование
|
|
54
|
+
|
|
55
|
+
```python
|
|
56
|
+
from rm_utils.reports import ExcelReporter
|
|
57
|
+
|
|
58
|
+
# Пример создания отчета в Excel
|
|
59
|
+
path = r"/path_to_excel/report.xlsx"
|
|
60
|
+
writer = ExcelReporter(path)
|
|
61
|
+
|
|
62
|
+
# Добавление датафреймов
|
|
63
|
+
writer.add_dataframe(data=your_dataframe, row_offset=4, col_offset=2)
|
|
64
|
+
|
|
65
|
+
# Сохранение отчета
|
|
66
|
+
writer.save()
|
|
67
|
+
```
|
|
68
|
+
Подробные примеры использования смотрите в [examples/usage_examples.ipynb](./examples/usage_examples.ipynb).
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
## История изменений
|
|
72
|
+
|
|
73
|
+
Все изменения подробно описаны в [CHANGELOG.md](https://github.com/n-emelyanov/RM_UTILS/blob/master/CHANGELOG.md)
|
|
74
|
+
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# Библиотека утилит для риск-менеджмента ML-моделей
|
|
2
|
+
|
|
3
|
+

|
|
4
|
+
|
|
5
|
+
Библиотека разработана для помощи в реализации и мониторинге рисковых ML-моделей в финансовой и других риск-ориентированных областях. Включает инструменты для расчета метрик, анализа стабильности, генерации отчетов и работы с данными.
|
|
6
|
+
|
|
7
|
+
## Ключевые возможности
|
|
8
|
+
- Расчет PSI (Population Stability Index) и других метрик стабильности
|
|
9
|
+
- Специализированные метрики для оценки качества ML-моделей в риск-менеджменте
|
|
10
|
+
- Генерация автоматизированных отчетов
|
|
11
|
+
- Утилиты для работы с SQL-запросами
|
|
12
|
+
- Инструменты для обработки и анализа данных
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
## Установка
|
|
16
|
+
```bash
|
|
17
|
+
pip install rm-utils
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
## Использование
|
|
22
|
+
|
|
23
|
+
```python
|
|
24
|
+
from rm_utils.reports import ExcelReporter
|
|
25
|
+
|
|
26
|
+
# Пример создания отчета в Excel
|
|
27
|
+
path = r"/path_to_excel/report.xlsx"
|
|
28
|
+
writer = ExcelReporter(path)
|
|
29
|
+
|
|
30
|
+
# Добавление датафреймов
|
|
31
|
+
writer.add_dataframe(data=your_dataframe, row_offset=4, col_offset=2)
|
|
32
|
+
|
|
33
|
+
# Сохранение отчета
|
|
34
|
+
writer.save()
|
|
35
|
+
```
|
|
36
|
+
Подробные примеры использования смотрите в [examples/usage_examples.ipynb](./examples/usage_examples.ipynb).
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
## История изменений
|
|
40
|
+
|
|
41
|
+
Все изменения подробно описаны в [CHANGELOG.md](https://github.com/n-emelyanov/RM_UTILS/blob/master/CHANGELOG.md)
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "rm-utils"
|
|
3
|
+
version = "1.0.0"
|
|
4
|
+
description = ""
|
|
5
|
+
authors = [
|
|
6
|
+
{name = "n-emelianov",email = "limbolume2023@gmail.com"}
|
|
7
|
+
]
|
|
8
|
+
license = {text = "MIT"}
|
|
9
|
+
readme = "docs/PYPI.md"
|
|
10
|
+
requires-python = ">=3.12, <3.15"
|
|
11
|
+
dependencies = [
|
|
12
|
+
"pandas (>=3.0.0,<4.0.0)",
|
|
13
|
+
"openpyxl (>=3.1.5,<4.0.0)",
|
|
14
|
+
"pillow (>=12.1.0,<13.0.0)",
|
|
15
|
+
"scikit-learn (>=1.8.0,<2.0.0)",
|
|
16
|
+
"matplotlib (>=3.10.8,<4.0.0)",
|
|
17
|
+
"sqlparse (>=0.5.5,<0.6.0)",
|
|
18
|
+
"sqlalchemy (>=2.0.46,<3.0.0)",
|
|
19
|
+
"sshtunnel (>=0.4.0,<0.5.0)",
|
|
20
|
+
"clickhouse-connect (>=0.10.0,<0.11.0)",
|
|
21
|
+
"psycopg2-binary (>=2.9.11,<3.0.0)",
|
|
22
|
+
"paramiko (<2.12.0)"
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
[build-system]
|
|
27
|
+
requires = ["poetry-core>=2.0.0,<3.0.0"]
|
|
28
|
+
build-backend = "poetry.core.masonry.api"
|
|
29
|
+
|
|
30
|
+
[dependency-groups]
|
|
31
|
+
dev = [
|
|
32
|
+
"pre-commit (>=4.5.1,<5.0.0)",
|
|
33
|
+
"pytest (>=9.0.2,<10.0.0)",
|
|
34
|
+
"ipykernel (>=7.1.0,<8.0.0)",
|
|
35
|
+
"seaborn (>=0.13.2,<0.14.0)"
|
|
36
|
+
]
|
|
37
|
+
|
|
38
|
+
[project.urls]
|
|
39
|
+
Homepage = "https://github.com/n-emelyanov/RM_UTILS" # Главная страница
|
|
40
|
+
Documentation = "https://github.com/n-emelyanov/RM_UTILS#readme" # Документация
|
|
41
|
+
Repository = "https://github.com/n-emelyanov/RM_UTILS.git" # Исходный код
|
|
42
|
+
Changelog = "https://github.com/n-emelyanov/RM_UTILS/blob/master/docs/CHANGELOG.md" # История изменений
|
|
43
|
+
Issues = "https://github.com/n-emelyanov/RM_UTILS/issues" # Баг-трекер
|
|
44
|
+
|
|
45
|
+
[tool.setuptools]
|
|
46
|
+
packages = ["rm_utils"]
|
|
Binary file
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .calculator import MetricCalculator
|
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
|
|
2
|
+
import itertools
|
|
3
|
+
import pandas as pd
|
|
4
|
+
import numpy as np
|
|
5
|
+
from typing import List
|
|
6
|
+
from IPython.display import display
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def all_combinations(my_list):
|
|
11
|
+
return itertools.chain.from_iterable(
|
|
12
|
+
itertools.combinations(my_list, i + 1) for i in range(len(my_list)))
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class MetricCalculator:
|
|
16
|
+
"""Source для расчета метрик по группам.
|
|
17
|
+
|
|
18
|
+
Parameters:
|
|
19
|
+
---
|
|
20
|
+
metr_funcs : dict
|
|
21
|
+
Словарь функций, используемых для расчета метрик по группам.
|
|
22
|
+
Функции должны следовать конвенции sklearn - первые 2 аргумента = y_true, y_pred
|
|
23
|
+
|
|
24
|
+
stats_funcs : dict, default={}
|
|
25
|
+
Словарь функций для расчета метрик по группам.
|
|
26
|
+
В отличие от metr_funcs используется для расчета статистик по таргету или по другим столбцам.
|
|
27
|
+
Функции принимают два аргумента - (y_true, data=None)
|
|
28
|
+
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
metr_funcs: dict,
|
|
34
|
+
funcs_params: dict = {},
|
|
35
|
+
stats_funcs: dict = {}
|
|
36
|
+
):
|
|
37
|
+
self.metr_funcs = metr_funcs
|
|
38
|
+
self.funcs_params = funcs_params
|
|
39
|
+
self.stats_funcs = stats_funcs
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _partial_stack(
|
|
43
|
+
self, result: pd.DataFrame, pred_cols: List[str], group_cols: List[str]
|
|
44
|
+
) -> pd.DataFrame:
|
|
45
|
+
"""Преобразует DataFrame с MultiIndex колонками в частично 'stacked' формат."""
|
|
46
|
+
|
|
47
|
+
columns_to_stack = pd.MultiIndex.from_product(
|
|
48
|
+
[pred_cols, self.metr_funcs.keys()]
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
# Стакаем и меняем столбцы
|
|
52
|
+
stacked = (
|
|
53
|
+
result[columns_to_stack]
|
|
54
|
+
.stack(level=0)
|
|
55
|
+
.reindex(self.metr_funcs.keys(), axis=1)
|
|
56
|
+
.reset_index()
|
|
57
|
+
.rename(columns={f"level_{len(group_cols)}": "pred"})
|
|
58
|
+
.set_index(group_cols)
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
kept = result.drop(columns=columns_to_stack)
|
|
62
|
+
kept.columns = kept.columns.get_level_values(1)
|
|
63
|
+
|
|
64
|
+
if len(kept.columns) == 0:
|
|
65
|
+
return stacked.reset_index()
|
|
66
|
+
|
|
67
|
+
# return pd.concat([stacked, kept], axis=1).reset_index()
|
|
68
|
+
return stacked.join(kept).reset_index()
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def _set_metr_funcs(self, data: pd.DataFrame, pred_cols: List[str]) -> dict:
|
|
73
|
+
|
|
74
|
+
# Используем pd.Series таргета для ускорения
|
|
75
|
+
true_values = data[self.true_col]
|
|
76
|
+
agg_funcs = {}
|
|
77
|
+
|
|
78
|
+
# Оборачиваем metr_funcs, подставляем параметры если имеются, добавляем в agg_funcs
|
|
79
|
+
for pred_col in pred_cols:
|
|
80
|
+
agg_funcs[pred_col] = [
|
|
81
|
+
(
|
|
82
|
+
func_name,
|
|
83
|
+
lambda x, f=func, params=self.funcs_params.get(func_name, {}): f(
|
|
84
|
+
true_values[x.index], x, **params
|
|
85
|
+
),
|
|
86
|
+
)
|
|
87
|
+
for func_name, func in self.metr_funcs.items()
|
|
88
|
+
]
|
|
89
|
+
|
|
90
|
+
# Оборачиваем stats_funcs и добавляем в agg_funcs
|
|
91
|
+
if self.stats_funcs:
|
|
92
|
+
agg_funcs[self.true_col] = [
|
|
93
|
+
(
|
|
94
|
+
func_name,
|
|
95
|
+
lambda x, func=func: func(true_values[x.index], data.loc[x.index])
|
|
96
|
+
)
|
|
97
|
+
for func_name, func in self.stats_funcs.items()
|
|
98
|
+
]
|
|
99
|
+
|
|
100
|
+
return agg_funcs
|
|
101
|
+
|
|
102
|
+
def calculate(
|
|
103
|
+
self,
|
|
104
|
+
data: pd.DataFrame,
|
|
105
|
+
true_col: str,
|
|
106
|
+
pred_cols: List[str] | str,
|
|
107
|
+
group_cols: List[str] | str,
|
|
108
|
+
groupby_exclude_combinations: list[str] | None = None,
|
|
109
|
+
pretify_one_func: bool = False,
|
|
110
|
+
) -> pd.DataFrame:
|
|
111
|
+
"""Расчет метрик по группам.
|
|
112
|
+
|
|
113
|
+
Parameters
|
|
114
|
+
---
|
|
115
|
+
data : pd.DataFrame
|
|
116
|
+
Датафрейм с данными.
|
|
117
|
+
true_col : str
|
|
118
|
+
Имя столбца с истинными значениями (передается первым аргументом в metr_funcs).
|
|
119
|
+
pred_cols : List[str] | str
|
|
120
|
+
Список имен столбцов с предсказаниями или одно имя столбца.
|
|
121
|
+
group_cols : List[str]
|
|
122
|
+
Список имен столбцов для группировки.
|
|
123
|
+
groupby_exclude_combinations : list[str] or None, default=None
|
|
124
|
+
Список столбцов для дополнитльной агрегации с all.
|
|
125
|
+
pretify_one_func : bool, default=False
|
|
126
|
+
Если True, то возвращает DataFrame с одной функцией в колонке.
|
|
127
|
+
|
|
128
|
+
Returns
|
|
129
|
+
-------
|
|
130
|
+
pd.DataFrame
|
|
131
|
+
Датафрейм с рассчитанными метриками.
|
|
132
|
+
|
|
133
|
+
Examples
|
|
134
|
+
-------
|
|
135
|
+
>>> metr_calc = MetricCalculator(
|
|
136
|
+
... metr_funcs={'mae': mean_absolute_error},
|
|
137
|
+
... stats_funcs={'n_obs': lambda y_true, data: len(data)
|
|
138
|
+
... )
|
|
139
|
+
>>> res = metr_calc.calculate(
|
|
140
|
+
... data=data,
|
|
141
|
+
... true_col='target',
|
|
142
|
+
... pred_cols=['pred1', 'pred2'],
|
|
143
|
+
... group_cols=['group1', 'group2']
|
|
144
|
+
... )
|
|
145
|
+
"""
|
|
146
|
+
self.true_col = true_col
|
|
147
|
+
|
|
148
|
+
# Конвертируем pred_cols/group_cols в список, если это строка
|
|
149
|
+
pred_cols = [pred_cols] if isinstance(pred_cols, str) else pred_cols
|
|
150
|
+
group_cols = [group_cols] if isinstance(group_cols, str) else group_cols
|
|
151
|
+
|
|
152
|
+
agg_funcs = self._set_metr_funcs(data, pred_cols)
|
|
153
|
+
|
|
154
|
+
result = data.groupby(group_cols).agg(agg_funcs)
|
|
155
|
+
|
|
156
|
+
if groupby_exclude_combinations is not None:
|
|
157
|
+
|
|
158
|
+
for idxs in all_combinations(range(len(groupby_exclude_combinations))):
|
|
159
|
+
|
|
160
|
+
# Исключенные группы
|
|
161
|
+
not_groupby_cols = list(
|
|
162
|
+
np.array(groupby_exclude_combinations.copy())[list(idxs)]
|
|
163
|
+
)
|
|
164
|
+
groupby_cols = [
|
|
165
|
+
col for col in group_cols if col not in not_groupby_cols
|
|
166
|
+
]
|
|
167
|
+
|
|
168
|
+
result_temp = data.groupby(groupby_cols).agg(agg_funcs)
|
|
169
|
+
|
|
170
|
+
for _removed in not_groupby_cols:
|
|
171
|
+
result_temp[_removed] = 'all'
|
|
172
|
+
|
|
173
|
+
result_temp = result_temp.reset_index().set_index(group_cols)
|
|
174
|
+
result = pd.concat([result, result_temp], axis=0)
|
|
175
|
+
|
|
176
|
+
# Красивый вывод, если была задействована одна функция
|
|
177
|
+
if pretify_one_func and len(self.metr_funcs) == 1:
|
|
178
|
+
|
|
179
|
+
result.columns = pred_cols + [*(self.stats_funcs or {})]
|
|
180
|
+
return result.reset_index()
|
|
181
|
+
|
|
182
|
+
result = self._partial_stack(result, pred_cols, group_cols)
|
|
183
|
+
|
|
184
|
+
return result
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
from sklearn.metrics import confusion_matrix, mean_squared_error
|
|
2
|
+
from sklearn.metrics import roc_auc_score
|
|
3
|
+
from pandas.api.types import is_numeric_dtype
|
|
4
|
+
import pandas as pd
|
|
5
|
+
import numpy as np
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def mean_absolute_percentage_error(y_true, y_pred, sample_weight=None):
|
|
9
|
+
"""MAPE metric"""
|
|
10
|
+
epsilon = np.finfo(np.float64).eps
|
|
11
|
+
mape = np.abs(y_pred - y_true) / np.maximum(np.abs(y_true), epsilon)
|
|
12
|
+
output_errors = np.average(mape, weights=sample_weight, axis=0)
|
|
13
|
+
|
|
14
|
+
return output_errors
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def shortfall(y_true, y_pred):
|
|
18
|
+
"""Shortfall metric"""
|
|
19
|
+
return 1 - np.sum(y_pred) / np.sum(y_true)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def pearson_corr(y_true, y_pred):
|
|
23
|
+
"""Pearson correlation coefficient"""
|
|
24
|
+
corr_coef = np.corrcoef(y_true, y_pred)
|
|
25
|
+
return corr_coef[0, 1]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def pearson_nan_corr(y_true, y_pred):
|
|
29
|
+
"""Pearson correlation coefficient ignoring nan values"""
|
|
30
|
+
corr_coef = np.ma.corrcoef(np.ma.masked_invalid(y_true), np.ma.masked_invalid(y_pred))
|
|
31
|
+
|
|
32
|
+
return corr_coef[0, 1]
|
|
33
|
+
|
|
34
|
+
def spearman_corr(y_true, y_pred):
|
|
35
|
+
"""Pearson correlation coefficient ignoring nan values"""
|
|
36
|
+
corr_coef = np.ma.corrcoef(np.ma.masked_invalid(y_true), np.ma.masked_invalid(y_pred))
|
|
37
|
+
|
|
38
|
+
return corr_coef[0, 1]
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def root_mse(y_true, y_pred):
|
|
42
|
+
"""Root MSE"""
|
|
43
|
+
return mean_squared_error(y_true, y_pred)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def gini_score(y_true, y_pred):
|
|
47
|
+
"""Gini score"""
|
|
48
|
+
roc_auc = roc_auc_score(y_true, y_pred)
|
|
49
|
+
return roc_auc * 2 - 1
|
|
50
|
+
|
|
51
|
+
def gini_score_safe(y_true, y_pred):
|
|
52
|
+
"""Gini score for unseasoned data"""
|
|
53
|
+
if len(np.unique(y_true)) != 2:
|
|
54
|
+
return 0
|
|
55
|
+
roc_auc = roc_auc_score(y_true, y_pred)
|
|
56
|
+
return roc_auc * 2 - 1
|
|
57
|
+
|
|
58
|
+
def roc_auc_score_nan(y_true, y_pred):
|
|
59
|
+
"""ROC_AUC score for bad/unseasoned data.
|
|
60
|
+
Calcs only where preds & labels is not None.
|
|
61
|
+
Returns nans instead or raising errors.
|
|
62
|
+
"""
|
|
63
|
+
notna_mask = (~np.isnan(y_pred)) & (~np.isnan(y_true))
|
|
64
|
+
y_true, y_pred = y_true[notna_mask], y_pred[notna_mask]
|
|
65
|
+
if (len(y_true) < 3) or (len(np.unique(y_true)) != 2):
|
|
66
|
+
return np.nan
|
|
67
|
+
return roc_auc_score(y_true, y_pred)
|
|
68
|
+
|
|
69
|
+
def gini_score_nan(y_true, y_pred):
|
|
70
|
+
"""Gini score for bad/unseasoned data.
|
|
71
|
+
Calcs only where preds & labels is not None.
|
|
72
|
+
Returns nans instead or raising errors.
|
|
73
|
+
"""
|
|
74
|
+
roc_auc = roc_auc_score_nan(y_true, y_pred)
|
|
75
|
+
return roc_auc * 2 - 1
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .stability import StabilityIndexCalculator, psi_plot
|