statslibx 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- statslib/__init__.py +35 -0
- statslib/descriptive.py +579 -0
- statslib/inferential.py +547 -0
- statslib/utils.py +889 -0
- statslibx-0.1.0.dist-info/METADATA +46 -0
- statslibx-0.1.0.dist-info/RECORD +8 -0
- statslibx-0.1.0.dist-info/WHEEL +5 -0
- statslibx-0.1.0.dist-info/top_level.txt +1 -0
statslib/__init__.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""
|
|
2
|
+
StatsLibx - Librería de Estadística para Python
|
|
3
|
+
Autor: Emmanuel Ascendra
|
|
4
|
+
Versión: 0.1.0
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
__version__ = "0.1.0"
|
|
8
|
+
__author__ = "Emmanuel Ascendra"
|
|
9
|
+
|
|
10
|
+
# Importar las clases principales
|
|
11
|
+
from .descriptive import DescriptiveStats, DescriptiveSummary
|
|
12
|
+
from .inferential import InferentialStats, TestResult
|
|
13
|
+
from .utils import UtilsStats
|
|
14
|
+
|
|
15
|
+
# Definir qué se expone cuando se hace: from statslib import *
|
|
16
|
+
__all__ = [
|
|
17
|
+
# Clases principales
|
|
18
|
+
'DescriptiveStats',
|
|
19
|
+
'InferentialStats',
|
|
20
|
+
'LinearRegressionResult',
|
|
21
|
+
'DescriptiveSummary',
|
|
22
|
+
'TestResult',
|
|
23
|
+
'UtilsStats',
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
# Mensaje de bienvenida (opcional)
|
|
27
|
+
def welcome():
|
|
28
|
+
"""Muestra información sobre la librería"""
|
|
29
|
+
print(f"StatsLib v{__version__}")
|
|
30
|
+
print(f"Librería de estadística descriptiva e inferencial")
|
|
31
|
+
print(f"Autor: {__author__}")
|
|
32
|
+
print(f"\nClases disponibles:")
|
|
33
|
+
print(f" - DescriptiveStats: Estadística descriptiva")
|
|
34
|
+
print(f" - InferentialStats: Estadística inferencial")
|
|
35
|
+
print(f"\nPara más información: help(statslib)")
|
statslib/descriptive.py
ADDED
|
@@ -0,0 +1,579 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import pandas as pd
|
|
3
|
+
from typing import Optional, Union, Literal, List
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
|
|
6
|
+
class DescriptiveStats:
|
|
7
|
+
"""
|
|
8
|
+
Clase para estadística descriptiva univariada y multivariada
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
def __init__(self, data: Union[pd.DataFrame, np.ndarray],
|
|
12
|
+
backend: Literal['pandas', 'polars'] = 'pandas'):
|
|
13
|
+
"""
|
|
14
|
+
Inicializar con DataFrame o array numpy
|
|
15
|
+
|
|
16
|
+
Parameters:
|
|
17
|
+
-----------
|
|
18
|
+
data : DataFrame o ndarray
|
|
19
|
+
Datos a analizar
|
|
20
|
+
backend : str
|
|
21
|
+
'pandas' o 'polars' para procesamiento
|
|
22
|
+
"""
|
|
23
|
+
if isinstance(data, np.ndarray):
|
|
24
|
+
if data.ndim == 1:
|
|
25
|
+
data = pd.DataFrame({'var': data})
|
|
26
|
+
else:
|
|
27
|
+
data = pd.DataFrame(data, columns=[f'var_{i}' for i in range(data.shape[1])])
|
|
28
|
+
|
|
29
|
+
self.data = data
|
|
30
|
+
self.backend = backend
|
|
31
|
+
self._numeric_cols = data.select_dtypes(include=[np.number]).columns.tolist()
|
|
32
|
+
|
|
33
|
+
# ============= MÉTODOS UNIVARIADOS =============
|
|
34
|
+
|
|
35
|
+
def mean(self, column: Optional[str] = None) -> Union[float, pd.Series]:
|
|
36
|
+
"""Media aritmética"""
|
|
37
|
+
if column:
|
|
38
|
+
return self.data[column].mean()
|
|
39
|
+
return self.data[self._numeric_cols].mean()
|
|
40
|
+
|
|
41
|
+
def median(self, column: Optional[str] = None) -> Union[float, pd.Series]:
|
|
42
|
+
"""Mediana"""
|
|
43
|
+
if column:
|
|
44
|
+
return self.data[column].median()
|
|
45
|
+
return self.data[self._numeric_cols].median()
|
|
46
|
+
|
|
47
|
+
def mode(self, column: Optional[str] = None):
|
|
48
|
+
"""Moda"""
|
|
49
|
+
if column:
|
|
50
|
+
return self.data[column].mode()[0]
|
|
51
|
+
return self.data[self._numeric_cols].mode().iloc[0]
|
|
52
|
+
|
|
53
|
+
def variance(self, column: Optional[str] = None) -> Union[float, pd.Series]:
|
|
54
|
+
"""Varianza"""
|
|
55
|
+
if column:
|
|
56
|
+
return self.data[column].var()
|
|
57
|
+
return self.data[self._numeric_cols].var()
|
|
58
|
+
|
|
59
|
+
def std(self, column: Optional[str] = None) -> Union[float, pd.Series]:
|
|
60
|
+
"""Desviación estándar"""
|
|
61
|
+
if column:
|
|
62
|
+
return self.data[column].std()
|
|
63
|
+
return self.data[self._numeric_cols].std()
|
|
64
|
+
|
|
65
|
+
def skewness(self, column: Optional[str] = None) -> Union[float, pd.Series]:
|
|
66
|
+
"""Asimetría"""
|
|
67
|
+
if column:
|
|
68
|
+
return self.data[column].skew()
|
|
69
|
+
return self.data[self._numeric_cols].skew()
|
|
70
|
+
|
|
71
|
+
def kurtosis(self, column: Optional[str] = None) -> Union[float, pd.Series]:
|
|
72
|
+
"""Curtosis"""
|
|
73
|
+
if column:
|
|
74
|
+
return self.data[column].kurtosis()
|
|
75
|
+
return self.data[self._numeric_cols].kurtosis()
|
|
76
|
+
|
|
77
|
+
def quantile(self, q: Union[float, List[float]], column: Optional[str] = None):
|
|
78
|
+
"""Cuantiles/Percentiles"""
|
|
79
|
+
if column:
|
|
80
|
+
return self.data[column].quantile(q)
|
|
81
|
+
return self.data[self._numeric_cols].quantile(q)
|
|
82
|
+
|
|
83
|
+
def outliers(self, column: str, method: Literal['iqr', 'zscore'] = 'iqr',
|
|
84
|
+
threshold: float = 1.5) -> pd.Series:
|
|
85
|
+
"""
|
|
86
|
+
Detectar outliers en una columna
|
|
87
|
+
|
|
88
|
+
Parameters:
|
|
89
|
+
-----------
|
|
90
|
+
column : str
|
|
91
|
+
Nombre de la columna
|
|
92
|
+
method : str
|
|
93
|
+
'iqr' o 'zscore'
|
|
94
|
+
threshold : float
|
|
95
|
+
1.5 para IQR, 3 para zscore típicamente
|
|
96
|
+
"""
|
|
97
|
+
col_data = self.data[column]
|
|
98
|
+
|
|
99
|
+
if method == 'iqr':
|
|
100
|
+
q1 = col_data.quantile(0.25)
|
|
101
|
+
q3 = col_data.quantile(0.75)
|
|
102
|
+
iqr = q3 - q1
|
|
103
|
+
lower_bound = q1 - threshold * iqr
|
|
104
|
+
upper_bound = q3 + threshold * iqr
|
|
105
|
+
outliers = (col_data < lower_bound) | (col_data > upper_bound)
|
|
106
|
+
else: # zscore
|
|
107
|
+
z_scores = np.abs((col_data - col_data.mean()) / col_data.std())
|
|
108
|
+
outliers = z_scores > threshold
|
|
109
|
+
|
|
110
|
+
return outliers
|
|
111
|
+
|
|
112
|
+
# ============= MÉTODOS MULTIVARIADOS =============
|
|
113
|
+
|
|
114
|
+
def correlation(self, method: Literal['pearson', 'spearman', 'kendall'] = 'pearson',
|
|
115
|
+
columns: Optional[List[str]] = None) -> pd.DataFrame:
|
|
116
|
+
"""
|
|
117
|
+
Matriz de correlación
|
|
118
|
+
|
|
119
|
+
Parameters:
|
|
120
|
+
-----------
|
|
121
|
+
method : str
|
|
122
|
+
'pearson', 'spearman' o 'kendall'
|
|
123
|
+
columns : list, optional
|
|
124
|
+
Lista de columnas a incluir
|
|
125
|
+
"""
|
|
126
|
+
data_subset = self.data[columns] if columns else self.data[self._numeric_cols]
|
|
127
|
+
return data_subset.corr(method=method)
|
|
128
|
+
|
|
129
|
+
def covariance(self, columns: Optional[List[str]] = None) -> pd.DataFrame:
|
|
130
|
+
"""Matriz de covarianza"""
|
|
131
|
+
data_subset = self.data[columns] if columns else self.data[self._numeric_cols]
|
|
132
|
+
return data_subset.cov()
|
|
133
|
+
|
|
134
|
+
# ============= MÉTODOS DE RESUMEN =============
|
|
135
|
+
|
|
136
|
+
def summary(self, columns: Optional[List[str]] = None,
|
|
137
|
+
show_plot: bool = False,
|
|
138
|
+
plot_backend: str = 'seaborn') -> 'DescriptiveSummary':
|
|
139
|
+
"""
|
|
140
|
+
Resumen completo de estadísticas descriptivas
|
|
141
|
+
|
|
142
|
+
Parameters:
|
|
143
|
+
-----------
|
|
144
|
+
columns : list, optional
|
|
145
|
+
Columnas específicas a resumir
|
|
146
|
+
show_plot : bool
|
|
147
|
+
Si mostrar gráficos
|
|
148
|
+
plot_backend : str
|
|
149
|
+
'seaborn', 'plotly' o 'matplotlib'
|
|
150
|
+
"""
|
|
151
|
+
cols = columns if columns else self._numeric_cols
|
|
152
|
+
|
|
153
|
+
results = {}
|
|
154
|
+
for col in cols:
|
|
155
|
+
col_data = self.data[col]
|
|
156
|
+
results[col] = {
|
|
157
|
+
'count': col_data.count(),
|
|
158
|
+
'mean': col_data.mean(),
|
|
159
|
+
'median': col_data.median(),
|
|
160
|
+
'mode': col_data.mode()[0] if len(col_data.mode()) > 0 else np.nan,
|
|
161
|
+
'std': col_data.std(),
|
|
162
|
+
'variance': col_data.var(),
|
|
163
|
+
'min': col_data.min(),
|
|
164
|
+
'q1': col_data.quantile(0.25),
|
|
165
|
+
'q3': col_data.quantile(0.75),
|
|
166
|
+
'max': col_data.max(),
|
|
167
|
+
'iqr': col_data.quantile(0.75) - col_data.quantile(0.25),
|
|
168
|
+
'skewness': col_data.skew(),
|
|
169
|
+
'kurtosis': col_data.kurtosis(),
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
return DescriptiveSummary(results, show_plot=show_plot, plot_backend=plot_backend)
|
|
173
|
+
|
|
174
|
+
# ============= REGRESIÓN LINEAL =============
|
|
175
|
+
|
|
176
|
+
def linear_regression(self,
|
|
177
|
+
y: str,
|
|
178
|
+
X: Union[str, List[str]],
|
|
179
|
+
engine: Literal['statsmodels', 'scikit-learn'] = 'statsmodels',
|
|
180
|
+
fit_intercept: bool = True,
|
|
181
|
+
show_plot: bool = False,
|
|
182
|
+
plot_backend: str = 'seaborn',
|
|
183
|
+
handle_missing: Literal['drop', 'error', 'warn'] = 'drop') -> 'LinearRegressionResult':
|
|
184
|
+
"""
|
|
185
|
+
Regresión lineal simple o múltiple
|
|
186
|
+
|
|
187
|
+
Parameters:
|
|
188
|
+
-----------
|
|
189
|
+
y : str
|
|
190
|
+
Variable dependiente
|
|
191
|
+
X : str o list
|
|
192
|
+
Variable(s) independiente(s)
|
|
193
|
+
engine : str
|
|
194
|
+
'statsmodels' o 'scikit-learn'
|
|
195
|
+
fit_intercept : bool
|
|
196
|
+
Si incluir intercepto
|
|
197
|
+
show_plot : bool
|
|
198
|
+
Mostrar gráficos diagnósticos
|
|
199
|
+
plot_backend : str
|
|
200
|
+
Backend para visualización
|
|
201
|
+
|
|
202
|
+
Returns:
|
|
203
|
+
--------
|
|
204
|
+
LinearRegressionResult
|
|
205
|
+
Objeto con resultados y método summary()
|
|
206
|
+
"""
|
|
207
|
+
if isinstance(X, str):
|
|
208
|
+
X = [X]
|
|
209
|
+
|
|
210
|
+
# Verificar que las columnas existen
|
|
211
|
+
missing_columns = []
|
|
212
|
+
if y not in self.data.columns:
|
|
213
|
+
missing_columns.append(y)
|
|
214
|
+
for x_col in X:
|
|
215
|
+
if x_col not in self.data.columns:
|
|
216
|
+
missing_columns.append(x_col)
|
|
217
|
+
|
|
218
|
+
if missing_columns:
|
|
219
|
+
raise ValueError(f"Columnas no encontradas: {missing_columns}")
|
|
220
|
+
|
|
221
|
+
# Crear DataFrame con solo las columnas necesarias
|
|
222
|
+
regression_data = self.data[[y] + X].copy()
|
|
223
|
+
|
|
224
|
+
# Manejar valores infinitos
|
|
225
|
+
numeric_cols = regression_data.select_dtypes(include=[np.number]).columns
|
|
226
|
+
for col in numeric_cols:
|
|
227
|
+
if regression_data[col].dtype in [np.float64, np.float32, np.float16]:
|
|
228
|
+
inf_mask = np.isinf(regression_data[col])
|
|
229
|
+
if inf_mask.any():
|
|
230
|
+
print(f"Advertencia: Columna '{col}' tiene {inf_mask.sum()} valores infinitos. Serán convertidos a NaN.")
|
|
231
|
+
regression_data[col] = regression_data[col].replace([np.inf, -np.inf], np.nan)
|
|
232
|
+
|
|
233
|
+
# Manejar valores faltantes
|
|
234
|
+
missing_before = regression_data.isnull().sum()
|
|
235
|
+
total_missing = missing_before.sum()
|
|
236
|
+
|
|
237
|
+
if total_missing > 0:
|
|
238
|
+
missing_info = "\n".join([f" - {col}: {missing_before[col]} missing"
|
|
239
|
+
for col in missing_before[missing_before > 0].index])
|
|
240
|
+
|
|
241
|
+
if handle_missing == 'error':
|
|
242
|
+
raise ValueError(f"Datos contienen valores faltantes:\n{missing_info}")
|
|
243
|
+
|
|
244
|
+
elif handle_missing == 'warn':
|
|
245
|
+
print(f"Advertencia: Datos contienen {total_missing} valores faltantes:\n{missing_info}")
|
|
246
|
+
print("Eliminando filas con valores faltantes...")
|
|
247
|
+
regression_data_clean = regression_data.dropna()
|
|
248
|
+
|
|
249
|
+
elif handle_missing == 'drop':
|
|
250
|
+
regression_data_clean = regression_data.dropna()
|
|
251
|
+
|
|
252
|
+
else:
|
|
253
|
+
raise ValueError(f"Método de manejo de missing values no reconocido: {handle_missing}")
|
|
254
|
+
|
|
255
|
+
# Informar sobre la limpieza
|
|
256
|
+
rows_before = len(regression_data)
|
|
257
|
+
rows_after = len(regression_data_clean)
|
|
258
|
+
rows_removed = rows_before - rows_after
|
|
259
|
+
|
|
260
|
+
if rows_removed > 0:
|
|
261
|
+
print(f"Limpieza de datos: {rows_removed} filas eliminadas ({rows_after} filas restantes)")
|
|
262
|
+
|
|
263
|
+
if rows_after < len(X) + 1: # +1 para el intercepto
|
|
264
|
+
raise ValueError(
|
|
265
|
+
f"Muy pocas filas después de limpieza: {rows_after}. "
|
|
266
|
+
f"Se necesitan al menos {len(X) + 1} filas para regresión."
|
|
267
|
+
)
|
|
268
|
+
else:
|
|
269
|
+
regression_data_clean = regression_data
|
|
270
|
+
|
|
271
|
+
# Extraer datos limpios
|
|
272
|
+
X_data = regression_data_clean[X].values
|
|
273
|
+
y_data = regression_data_clean[y].values
|
|
274
|
+
|
|
275
|
+
# Validar que los datos son numéricos
|
|
276
|
+
if not np.issubdtype(X_data.dtype, np.number):
|
|
277
|
+
raise ValueError("Las variables independientes deben ser numéricas")
|
|
278
|
+
if not np.issubdtype(y_data.dtype, np.number):
|
|
279
|
+
raise ValueError("La variable dependiente debe ser numérica")
|
|
280
|
+
|
|
281
|
+
# Validar que no hay más missing values
|
|
282
|
+
if np.isnan(X_data).any() or np.isnan(y_data).any():
|
|
283
|
+
raise ValueError("Todavía hay valores NaN después de la limpieza")
|
|
284
|
+
|
|
285
|
+
# Validar que no hay valores infinitos
|
|
286
|
+
if np.isinf(X_data).any() or np.isinf(y_data).any():
|
|
287
|
+
raise ValueError("Todavía hay valores infinitos después de la limpieza")
|
|
288
|
+
|
|
289
|
+
# Crear y ajustar el modelo
|
|
290
|
+
result = LinearRegressionResult(
|
|
291
|
+
X_data, y_data, X, y,
|
|
292
|
+
engine=engine,
|
|
293
|
+
fit_intercept=fit_intercept
|
|
294
|
+
)
|
|
295
|
+
result.fit()
|
|
296
|
+
result.show_plot = show_plot
|
|
297
|
+
result.plot_backend = plot_backend
|
|
298
|
+
|
|
299
|
+
# Agregar información de limpieza al resultado
|
|
300
|
+
result.data_info = {
|
|
301
|
+
'original_rows': len(self.data),
|
|
302
|
+
'clean_rows': len(regression_data_clean),
|
|
303
|
+
'rows_removed': len(self.data) - len(regression_data_clean),
|
|
304
|
+
'missing_handled': total_missing > 0
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
return result
|
|
308
|
+
|
|
309
|
+
def help(self):
|
|
310
|
+
"""
|
|
311
|
+
Muestra ayuda completa de la clase DescriptiveStats
|
|
312
|
+
"""
|
|
313
|
+
help_text = """
|
|
314
|
+
📈 CLASE DescriptiveStats - AYUDA COMPLETA
|
|
315
|
+
|
|
316
|
+
Clase para análisis estadístico descriptivo univariado y multivariado
|
|
317
|
+
|
|
318
|
+
🔧 MÉTODOS PRINCIPALES:
|
|
319
|
+
|
|
320
|
+
1. 📊 ESTADÍSTICAS UNIVARIADAS:
|
|
321
|
+
• .mean(), .median(), .mode() # Tendencia central
|
|
322
|
+
• .std(), .variance() # Dispersión
|
|
323
|
+
• .skewness(), .kurtosis() # Forma de distribución
|
|
324
|
+
• .quantile(0.25) # Cuantiles
|
|
325
|
+
• .outliers('columna') # Detección de outliers
|
|
326
|
+
|
|
327
|
+
2. 🔗 ESTADÍSTICAS MULTIVARIADAS:
|
|
328
|
+
• .correlation() # Matriz de correlación
|
|
329
|
+
• .covariance() # Matriz de covarianza
|
|
330
|
+
|
|
331
|
+
3. 📋 RESUMEN COMPLETO:
|
|
332
|
+
• .summary() # Resumen descriptivo completo
|
|
333
|
+
• .summary(show_plot=True) # Con visualizaciones
|
|
334
|
+
|
|
335
|
+
4. 📈 REGRESIÓN LINEAL:
|
|
336
|
+
• .linear_regression(y, X) # Regresión simple/múltiple
|
|
337
|
+
|
|
338
|
+
💡 EJEMPLOS DE USO:
|
|
339
|
+
|
|
340
|
+
# Inicializar
|
|
341
|
+
estadisticas = DescriptiveStats(mi_dataframe)
|
|
342
|
+
|
|
343
|
+
# Análisis univariado
|
|
344
|
+
media = estadisticas.mean('edad')
|
|
345
|
+
resumen = estadisticas.summary()
|
|
346
|
+
|
|
347
|
+
# Regresión
|
|
348
|
+
modelo = estadisticas.linear_regression(
|
|
349
|
+
y='ventas',
|
|
350
|
+
X=['publicidad', 'precio'],
|
|
351
|
+
show_plot=True
|
|
352
|
+
)
|
|
353
|
+
print(modelo.summary())
|
|
354
|
+
"""
|
|
355
|
+
print(help_text)
|
|
356
|
+
|
|
357
|
+
class DescriptiveSummary:
|
|
358
|
+
"""Clase para formatear salida de estadística descriptiva"""
|
|
359
|
+
|
|
360
|
+
def __init__(self, results: dict, show_plot: bool = False, plot_backend: str = 'seaborn'):
|
|
361
|
+
self.results = results
|
|
362
|
+
self.show_plot = show_plot
|
|
363
|
+
self.plot_backend = plot_backend
|
|
364
|
+
|
|
365
|
+
def __repr__(self):
|
|
366
|
+
return self._format_output()
|
|
367
|
+
|
|
368
|
+
def _format_output(self):
|
|
369
|
+
"""Formato de tabla organizada para múltiples variables"""
|
|
370
|
+
output = []
|
|
371
|
+
output.append("=" * 100)
|
|
372
|
+
output.append("RESUMEN DE ESTADÍSTICA DESCRIPTIVA".center(100))
|
|
373
|
+
output.append("=" * 100)
|
|
374
|
+
output.append(f"Fecha: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
|
375
|
+
output.append(f"Variables analizadas: {len(self.results)}")
|
|
376
|
+
output.append("-" * 100)
|
|
377
|
+
|
|
378
|
+
for var_name, stats in self.results.items():
|
|
379
|
+
output.append(f"\n{'VARIABLE: ' + var_name:^100}")
|
|
380
|
+
output.append("-" * 100)
|
|
381
|
+
|
|
382
|
+
# Tendencia central
|
|
383
|
+
output.append("\nMedidas de Tendencia Central:")
|
|
384
|
+
output.append(f"{' Conteo':<40} {stats['count']:>20.0f}")
|
|
385
|
+
output.append(f"{' Media':<40} {stats['mean']:>20.6f}")
|
|
386
|
+
output.append(f"{' Mediana':<40} {stats['median']:>20.6f}")
|
|
387
|
+
output.append(f"{' Moda':<40} {stats['mode']:>20.6f}")
|
|
388
|
+
|
|
389
|
+
# Dispersión
|
|
390
|
+
output.append("\nMedidas de Dispersión:")
|
|
391
|
+
output.append(f"{' Desviación Estándar':<40} {stats['std']:>20.6f}")
|
|
392
|
+
output.append(f"{' Varianza':<40} {stats['variance']:>20.6f}")
|
|
393
|
+
output.append(f"{' Rango Intercuartílico (IQR)':<40} {stats['iqr']:>20.6f}")
|
|
394
|
+
|
|
395
|
+
# Cuartiles
|
|
396
|
+
output.append("\nCuartiles y Rango:")
|
|
397
|
+
output.append(f"{' Mínimo':<40} {stats['min']:>20.6f}")
|
|
398
|
+
output.append(f"{' Primer Cuartil (Q1)':<40} {stats['q1']:>20.6f}")
|
|
399
|
+
output.append(f"{' Tercer Cuartil (Q3)':<40} {stats['q3']:>20.6f}")
|
|
400
|
+
output.append(f"{' Máximo':<40} {stats['max']:>20.6f}")
|
|
401
|
+
|
|
402
|
+
# Forma
|
|
403
|
+
output.append("\nForma de la Distribución:")
|
|
404
|
+
output.append(f"{' Asimetría (Skewness)':<40} {stats['skewness']:>20.6f}")
|
|
405
|
+
output.append(f"{' Curtosis (Kurtosis)':<40} {stats['kurtosis']:>20.6f}")
|
|
406
|
+
|
|
407
|
+
output.append("-" * 100)
|
|
408
|
+
|
|
409
|
+
output.append("=" * 100)
|
|
410
|
+
return "\n".join(output)
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
import numpy as np
|
|
414
|
+
from datetime import datetime
|
|
415
|
+
|
|
416
|
+
|
|
417
|
+
class LinearRegressionResult:
|
|
418
|
+
"""Clase para resultados de regresión lineal"""
|
|
419
|
+
|
|
420
|
+
def __init__(self, X, y, X_names, y_name, engine='statsmodels', fit_intercept=True):
|
|
421
|
+
self.X = X
|
|
422
|
+
self.y = y
|
|
423
|
+
self.X_names = X_names
|
|
424
|
+
self.y_name = y_name
|
|
425
|
+
self.engine = engine
|
|
426
|
+
self.fit_intercept = fit_intercept
|
|
427
|
+
self.model = None
|
|
428
|
+
self.results = None
|
|
429
|
+
self.show_plot = False
|
|
430
|
+
self.plot_backend = 'seaborn'
|
|
431
|
+
|
|
432
|
+
# Atributos que se llenarán después del fit
|
|
433
|
+
self.coef_ = None
|
|
434
|
+
self.intercept_ = None
|
|
435
|
+
self.r_squared = None
|
|
436
|
+
self.adj_r_squared = None
|
|
437
|
+
self.f_statistic = None
|
|
438
|
+
self.f_pvalue = None
|
|
439
|
+
self.aic = None
|
|
440
|
+
self.bic = None
|
|
441
|
+
self.residuals = None
|
|
442
|
+
self.predictions = None
|
|
443
|
+
self.std_errors = None
|
|
444
|
+
self.t_values = None
|
|
445
|
+
self.p_values = None
|
|
446
|
+
|
|
447
|
+
def fit(self):
|
|
448
|
+
"""Ajustar el modelo"""
|
|
449
|
+
if self.engine == 'statsmodels':
|
|
450
|
+
import statsmodels.api as sm
|
|
451
|
+
X = self.X.copy()
|
|
452
|
+
if self.fit_intercept:
|
|
453
|
+
X = sm.add_constant(X)
|
|
454
|
+
self.model = sm.OLS(self.y, X)
|
|
455
|
+
self.results = self.model.fit()
|
|
456
|
+
|
|
457
|
+
# Extraer atributos
|
|
458
|
+
if self.fit_intercept:
|
|
459
|
+
self.intercept_ = self.results.params[0]
|
|
460
|
+
self.coef_ = self.results.params[1:]
|
|
461
|
+
self.std_errors = self.results.bse[1:]
|
|
462
|
+
self.t_values = self.results.tvalues[1:]
|
|
463
|
+
self.p_values = self.results.pvalues[1:]
|
|
464
|
+
else:
|
|
465
|
+
self.intercept_ = 0
|
|
466
|
+
self.coef_ = self.results.params
|
|
467
|
+
self.std_errors = self.results.bse
|
|
468
|
+
self.t_values = self.results.tvalues
|
|
469
|
+
self.p_values = self.results.pvalues
|
|
470
|
+
|
|
471
|
+
self.r_squared = self.results.rsquared
|
|
472
|
+
self.adj_r_squared = self.results.rsquared_adj
|
|
473
|
+
self.f_statistic = self.results.fvalue
|
|
474
|
+
self.f_pvalue = self.results.f_pvalue
|
|
475
|
+
self.aic = self.results.aic
|
|
476
|
+
self.bic = self.results.bic
|
|
477
|
+
self.residuals = self.results.resid
|
|
478
|
+
self.predictions = self.results.fittedvalues
|
|
479
|
+
|
|
480
|
+
else: # scikit-learn
|
|
481
|
+
from sklearn.linear_model import LinearRegression
|
|
482
|
+
self.model = LinearRegression(fit_intercept=self.fit_intercept)
|
|
483
|
+
self.model.fit(self.X, self.y)
|
|
484
|
+
|
|
485
|
+
self.coef_ = self.model.coef_
|
|
486
|
+
self.intercept_ = self.model.intercept_
|
|
487
|
+
self.r_squared = self.model.score(self.X, self.y)
|
|
488
|
+
self.predictions = self.model.predict(self.X)
|
|
489
|
+
self.residuals = self.y - self.predictions
|
|
490
|
+
|
|
491
|
+
# Calcular métricas adicionales manualmente
|
|
492
|
+
n, k = self.X.shape
|
|
493
|
+
self.adj_r_squared = 1 - (1 - self.r_squared) * (n - 1) / (n - k - 1)
|
|
494
|
+
|
|
495
|
+
return self
|
|
496
|
+
|
|
497
|
+
def predict(self, X_new):
|
|
498
|
+
"""Hacer predicciones con nuevos datos"""
|
|
499
|
+
if self.engine == 'statsmodels':
|
|
500
|
+
import statsmodels.api as sm
|
|
501
|
+
if self.fit_intercept:
|
|
502
|
+
X_new = sm.add_constant(X_new)
|
|
503
|
+
return self.results.predict(X_new)
|
|
504
|
+
else:
|
|
505
|
+
return self.model.predict(X_new)
|
|
506
|
+
|
|
507
|
+
def summary(self):
|
|
508
|
+
"""Mostrar resumen estilo OLS"""
|
|
509
|
+
return self.__repr__()
|
|
510
|
+
|
|
511
|
+
def __repr__(self):
|
|
512
|
+
return self._format_output()
|
|
513
|
+
|
|
514
|
+
def _format_output(self):
|
|
515
|
+
"""Formato estilo OLS de statsmodels"""
|
|
516
|
+
output = []
|
|
517
|
+
output.append("=" * 100)
|
|
518
|
+
output.append("RESULTADOS DE REGRESIÓN LINEAL".center(100))
|
|
519
|
+
output.append("=" * 100)
|
|
520
|
+
output.append(f"Variable Dependiente: {self.y_name}")
|
|
521
|
+
output.append(f"Variables Independientes: {', '.join(self.X_names)}")
|
|
522
|
+
output.append(f"Motor: {self.engine}")
|
|
523
|
+
output.append(f"Fecha: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
|
524
|
+
output.append("-" * 100)
|
|
525
|
+
|
|
526
|
+
# Información del modelo
|
|
527
|
+
output.append("\nINFORMACIÓN DEL MODELO:")
|
|
528
|
+
output.append("-" * 100)
|
|
529
|
+
output.append(f"{'Estadístico':<50} {'Valor':>20}")
|
|
530
|
+
output.append("-" * 100)
|
|
531
|
+
output.append(f"{'R-cuadrado':<50} {self.r_squared:>20.6f}")
|
|
532
|
+
output.append(f"{'R-cuadrado Ajustado':<50} {self.adj_r_squared:>20.6f}")
|
|
533
|
+
|
|
534
|
+
if self.f_statistic is not None:
|
|
535
|
+
output.append(f"{'Estadístico F':<50} {self.f_statistic:>20.6f}")
|
|
536
|
+
output.append(f"{'Prob (F-estadístico)':<50} {self.f_pvalue:>20.6e}")
|
|
537
|
+
|
|
538
|
+
if self.aic is not None:
|
|
539
|
+
output.append(f"{'AIC':<50} {self.aic:>20.6f}")
|
|
540
|
+
output.append(f"{'BIC':<50} {self.bic:>20.6f}")
|
|
541
|
+
|
|
542
|
+
# Coeficientes
|
|
543
|
+
output.append("\nCOEFICIENTES:")
|
|
544
|
+
output.append("-" * 100)
|
|
545
|
+
|
|
546
|
+
if self.std_errors is not None:
|
|
547
|
+
output.append(f"{'Variable':<20} {'Coef.':>15} {'Std Err':>15} {'t':>15} {'P>|t|':>15}")
|
|
548
|
+
output.append("-" * 100)
|
|
549
|
+
output.append(f"{'const':<20} {self.intercept_:>15.6f} {'-':>15} {'-':>15} {'-':>15}")
|
|
550
|
+
|
|
551
|
+
for i, name in enumerate(self.X_names):
|
|
552
|
+
output.append(
|
|
553
|
+
f"{name:<20} {self.coef_[i]:>15.6f} {self.std_errors[i]:>15.6f} "
|
|
554
|
+
f"{self.t_values[i]:>15.3f} {self.p_values[i]:>15.6f}"
|
|
555
|
+
)
|
|
556
|
+
else:
|
|
557
|
+
output.append(f"{'Variable':<20} {'Coeficiente':>20}")
|
|
558
|
+
output.append("-" * 100)
|
|
559
|
+
output.append(f"{'const':<20} {self.intercept_:>20.6f}")
|
|
560
|
+
|
|
561
|
+
for i, name in enumerate(self.X_names):
|
|
562
|
+
output.append(f"{name:<20} {self.coef_[i]:>20.6f}")
|
|
563
|
+
|
|
564
|
+
# Análisis de residuos
|
|
565
|
+
output.append("\nANÁLISIS DE RESIDUOS:")
|
|
566
|
+
output.append("-" * 100)
|
|
567
|
+
output.append(f"{'Estadístico':<50} {'Valor':>20}")
|
|
568
|
+
output.append("-" * 100)
|
|
569
|
+
output.append(f"{'Media de Residuos':<50} {np.mean(self.residuals):>20.6f}")
|
|
570
|
+
output.append(f"{'Desv. Std. de Residuos':<50} {np.std(self.residuals):>20.6f}")
|
|
571
|
+
output.append(f"{'Mínimo Residuo':<50} {np.min(self.residuals):>20.6f}")
|
|
572
|
+
output.append(f"{'Máximo Residuo':<50} {np.max(self.residuals):>20.6f}")
|
|
573
|
+
|
|
574
|
+
output.append("=" * 100)
|
|
575
|
+
|
|
576
|
+
if self.show_plot:
|
|
577
|
+
output.append("\n[Gráficos diagnósticos generados]")
|
|
578
|
+
|
|
579
|
+
return "\n".join(output)
|