sarapy 3.0.0__py3-none-any.whl → 3.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -32,8 +32,8 @@ class OpsProcessor():
32
32
  self.classifications_probas = None
33
33
  plclass_map = {"classifier_file"}
34
34
  self._operationsDict = {} ##diccionario de operarios con sus operaciones
35
- self._platin_classifiedOperations = np.array([]) ##array con las operaciones clasificadas para plantin
36
- self._fertilizer_classifiedOperations = np.array([]) ##array con las operaciones clasificadas para plantin
35
+ # self._platin_classifiedOperations = np.array([]) ##array con las operaciones clasificadas para plantin
36
+ # self._fertilizer_classifiedOperations = np.array([]) ##array con las operaciones clasificadas para plantin
37
37
  self._last_row_db = 0 ##indicador de la última fila de los datos extraidos de la base de datos histórica
38
38
 
39
39
  kwargs_plclass = {}
@@ -343,7 +343,8 @@ if __name__ == "__main__":
343
343
  samples = json.load(file)
344
344
 
345
345
  op = OpsProcessor(classifier_file='modelos\\pipeline_rf.pkl',
346
- regresor_file='modelos\\regresor.pkl', poly_features_file='modelos\\poly_features.pkl',
346
+ # regresor_file='modelos\\regresor.pkl', poly_features_file='modelos\\poly_features.pkl',
347
+ regresor_file='modelos\\regresor_v2.pkl', poly_features_file='modelos\\poly_features_v2.pkl',
347
348
  **kwargs_fmcreator)
348
349
 
349
350
  ops_clasificadas = op.processOperations(samples, **kwargs_classifier)
@@ -48,23 +48,151 @@ class FertilizerTransformer:
48
48
  X_poly = self._poly_features.fit_transform(X.reshape(-1, 1))
49
49
  self.fertilizer_grams = self._regresor.predict(X_poly)
50
50
 
51
+ ##para valores de distorsión de 13+-0.3 y 15+-0.3, pongo los valores en 8 y 9 gramos, respectivamente
52
+ ##uso máscara booleana para encontrar los índices
53
+ mask_13 = (X >= 12.7) & (X <= 13.3)
54
+ mask_15 = (X >= 14.7) & (X <= 15.3)
55
+ self.fertilizer_grams[mask_13] = 8
56
+ self.fertilizer_grams[mask_15] = 9
57
+
51
58
  ##retorno con shape (n,)
52
59
  return self.fertilizer_grams.reshape(-1,)
53
60
 
54
61
  if __name__ == "__main__":
55
62
  import pandas as pd
63
+ import numpy as np
56
64
  import json
57
65
  from sarapy.preprocessing import TransformInputData
66
+ import matplotlib.pyplot as plt
67
+ from collections import Counter
68
+
69
+ fecha = "2025-08-09"
70
+ nodo_interes = "UPM095N"
58
71
 
59
- historical_data_path = "examples/2025-06-21/UPM000N/historical-data.json"
72
+ historical_data_path = f"examples//{fecha}//{nodo_interes}//historical-data.json"
60
73
  with open(historical_data_path, 'r') as file:
61
74
  historical_data = json.load(file)
62
75
 
63
76
  ##cargo en un diccionario sarapy\preprocessing\telemetriaDataPosition.json
64
77
  data_positions = json.load(open("sarapy/preprocessing/telemetriaDataPosition.json", 'r'))
65
- transform_input_data = TransformInputData.TransformInputData()
78
+ transform_input_data = TransformInputData()
66
79
  transformed_data = transform_input_data.transform(historical_data)
67
80
 
68
- fertransformer = FertilizerTransformer(regresor_file='modelos\\regresor.pkl', poly_features_file='modelos\\poly_features.pkl')
81
+ fertransformer = FertilizerTransformer(regresor_file='modelos\\regresor_v2.pkl', poly_features_file='modelos\\poly_features_v2.pkl')
69
82
  gramos = fertransformer.transform(transformed_data)
70
- print(gramos[:10])
83
+ print(gramos[:10])
84
+
85
+ df = pd.DataFrame(transformed_data)
86
+ score_ft = df["SC_FT"].values
87
+
88
+ print(score_ft.mean(), gramos.mean())
89
+ print(score_ft.max(), gramos.max())
90
+ print(score_ft.min(), gramos.min())
91
+
92
+ puntos = list(zip(score_ft, gramos))
93
+ conteos = Counter(puntos)
94
+ xs, ys, sizes = zip(*[(x, y, c) for (x, y), c in conteos.items()])
95
+
96
+ np.array([s*10 for s in sizes]).shape
97
+
98
+ points = np.column_stack((score_ft, gramos))
99
+ unique_points, counts = np.unique(points, axis=0, return_counts=True)
100
+
101
+ sizes = np.log1p(counts) * 50
102
+
103
+ plt.figure(figsize=(10, 6))
104
+ handles, labels = plt.gca().get_legend_handles_labels()
105
+ order = [2, 0, 1]
106
+ plt.scatter(unique_points[:,0], unique_points[:,1], color="#5612af", label="Regresor 1 - Orden 12",zorder=1,
107
+ s=sizes)
108
+ plt.scatter(score_ft.mean(), gramos.mean(), color="#af121f", label="Punto promedio", marker='X',s=400)
109
+ plt.title(f'Predicciones Regresor 2 de orden 12 para NODO: {nodo_interes}')
110
+ plt.xlabel('Score de Fertilizante (SC_FT)')
111
+ plt.ylabel('Predicciones de Gramos de Fertilizante')
112
+ plt.grid(True)
113
+ plt.legend()
114
+ plt.savefig(f'predicciones_regresor2_orden12_{nodo_interes}.png')
115
+ plt.show()
116
+
117
+ nodos = ["UPM075N", "UPM076N", "UPM077N", "UPM078N", "UPM079N", "UPM080N", "UPM081N", "UPM082N", "UPM083N", "UPM084N",
118
+ "UPM085N", "UPM086N", "UPM087N", "UPM088N", "UPM089N", "UPM090N", "UPM091N", "UPM092N", "UPM093N", "UPM094N", "UPM095N",
119
+ "UPM096N", "UPM097N", "UPM098N", "UPM099N"]
120
+
121
+ ##cargo datos históricos de ejemplo
122
+
123
+ scores_ft_maximos = {}
124
+ scores_ft_minimos = {}
125
+ gramos_maximos = {}
126
+ gramos_minimos = {}
127
+ for nodo in nodos:
128
+ historical_data_path = f"examples//{fecha}//{nodo}//historical-data.json"
129
+ try:
130
+ with open(historical_data_path, 'r') as file:
131
+ historical_data = json.load(file)
132
+ except FileNotFoundError:
133
+ print(f"El archivo {historical_data_path} no se encuentra en el directorio actual.")
134
+ continue
135
+ transform_input_data = TransformInputData()
136
+ transformed_data = transform_input_data.transform(historical_data)
137
+ fertransformer = FertilizerTransformer(regresor_file='modelos\\regresor_v2.pkl', poly_features_file='modelos\\poly_features_v2.pkl')
138
+ gramos = fertransformer.transform(transformed_data)
139
+ gramos_maximos[nodo] = gramos.max()
140
+ gramos_minimos[nodo] = gramos.min()
141
+
142
+ df = pd.DataFrame(transformed_data)
143
+ score_ft = df["SC_FT"].values
144
+ scores_ft_maximos[nodo] = score_ft.max()
145
+ scores_ft_minimos[nodo] = score_ft.min()
146
+
147
+ data = np.array([[gramos_maximos[nodo] for nodo in nodos],
148
+ [scores_ft_maximos[nodo] for nodo in nodos],
149
+ [gramos_minimos[nodo] for nodo in nodos],
150
+ [scores_ft_minimos[nodo] for nodo in nodos]])
151
+
152
+ data_df = pd.DataFrame(data=data.T, index=nodos, columns=['Gramos_Fertilizante', 'Score_Fertilizante', 'Gramos_Fertilizante_Min', 'Score_Fertilizante_Min'])
153
+
154
+ data_df['Gramos_Fertilizante'].plot.bar(figsize=(12, 6), color="#34a853", legend=False)
155
+ #add text labels on top of each bar with the height value
156
+ for i, v in enumerate(data_df['Gramos_Fertilizante']):
157
+ plt.text(i, v + 0.1, f"{v:.1f}", ha='center', va='bottom',color="#34a853")
158
+ plt.title('Máximos de gramos de fertilizante por nodo')
159
+ plt.xlabel('Nodos')
160
+ plt.ylabel('Gramos de Fertilizante')
161
+ plt.grid(axis='y')
162
+ plt.savefig('maximos_gramos_fertilizante_por_nodo.png')
163
+ plt.show()
164
+
165
+ data_df['Gramos_Fertilizante_Min'].plot.bar(figsize=(12, 6), color="#34a853", legend=False)
166
+ #add text labels on top of each bar with the height value
167
+ for i, v in enumerate(data_df['Gramos_Fertilizante_Min']):
168
+ plt.text(i, v + 0.1, f"{v:.1f}", ha='center', va='bottom',color="#34a853")
169
+ plt.title('Mínimos de gramos de fertilizante por nodo')
170
+ plt.xlabel('Nodos')
171
+ plt.ylabel('Gramos de Fertilizante')
172
+ plt.grid(axis='y')
173
+ plt.savefig('minimos_gramos_fertilizante_por_nodo.png')
174
+ plt.show()
175
+
176
+ data_df['Score_Fertilizante'].plot.bar(figsize=(12, 6), color="#3434a8", legend=False)
177
+ #add text labels on top of each bar with the height value
178
+ for i, v in enumerate(data_df['Score_Fertilizante']):
179
+ plt.text(i, v + 0.1, f"{v:.1f}", ha='center', va='bottom',color="#3434a8")
180
+ plt.title('Máximos de score de fertilizante por nodo')
181
+ plt.xlabel('Nodos')
182
+ plt.ylabel('Score de Fertilizante')
183
+ plt.grid(axis='y')
184
+ plt.savefig('maximos_score_fertilizante_por_nodo.png')
185
+ plt.show()
186
+
187
+ data_df['Score_Fertilizante_Min'].plot.bar(figsize=(12, 6), color="#3434a8", legend=False)
188
+ #add text labels on top of each bar with the height value
189
+ for i, v in enumerate(data_df['Score_Fertilizante_Min']):
190
+ plt.text(i, v + 0.1, f"{v:.1f}", ha='center', va='bottom',color="#3434a8")
191
+ plt.title('Mínimos de score de fertilizante por nodo')
192
+ plt.xlabel('Nodos')
193
+ plt.ylabel('Score de Fertilizante')
194
+ plt.grid(axis='y')
195
+ plt.savefig('minimos_score_fertilizante_por_nodo.png')
196
+ plt.show()
197
+
198
+
sarapy/version.py CHANGED
@@ -1,2 +1,2 @@
1
1
  ## Version of the package
2
- __version__ = "3.0.0"
2
+ __version__ = "3.1.1"
@@ -1,24 +1,47 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: sarapy
3
- Version: 3.0.0
4
- Home-page: https://github.com/lucasbaldezzari/sarapy
5
- Author: Lucas Baldezzari
3
+ Version: 3.1.1
4
+ Summary: Library for Sarapico Metadata processing
6
5
  Author-email: Lucas Baldezzari <lmbaldezzari@gmail.com>
7
- Maintainer-email: Lucas Baldezzari <lmbaldezzari@gmail.com>
8
- License: For private use only. Owner AMG Servicios profesionales (Mercedes, Uruguay)
6
+ License: MIT
7
+ Classifier: License :: OSI Approved :: MIT License
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: Operating System :: Microsoft :: Windows :: Windows 10
10
+ Classifier: Operating System :: Microsoft :: Windows :: Windows 11
11
+ Classifier: Operating System :: Unix
12
+ Requires-Python: >=3.9
9
13
  Description-Content-Type: text/markdown
10
14
  License-File: LICENCE
11
- Requires-Dist: numpy
12
- Requires-Dist: matplotlib
13
- Requires-Dist: pandas
14
- Requires-Dist: scipy
15
- Requires-Dist: scikit-learn
16
- Requires-Dist: geopy
15
+ Requires-Dist: numpy>=1.23
16
+ Requires-Dist: pandas>=1.5
17
+ Requires-Dist: scipy>=1.9
18
+ Requires-Dist: scikit-learn>=1.2
19
+ Requires-Dist: matplotlib>=3.6
20
+ Requires-Dist: seaborn>=0.12
21
+ Requires-Dist: requests>=2.28
22
+ Requires-Dist: python-dotenv>=1.0
23
+ Requires-Dist: geopy>=2.3
24
+ Provides-Extra: dev
25
+ Requires-Dist: pytest; extra == "dev"
26
+ Requires-Dist: black; extra == "dev"
27
+ Requires-Dist: ruff; extra == "dev"
28
+ Requires-Dist: mypy; extra == "dev"
29
+ Dynamic: license-file
17
30
 
18
31
  # SARAPY
19
32
 
20
33
  Library for processing SARAPICO project metadata of _AMG SA_.
21
34
 
35
+ #### Version 3.1.1
36
+
37
+ - Se vuelve a generar archivos de regresor_v2 y poly_features_v2 porque quedó mal cargado.
38
+
39
+ #### Version 3.1.0
40
+
41
+ - Se actualiza regresor para estimar fertilizante.
42
+ - Actualización de archivos para instalar la libería.
43
+
44
+
22
45
  #### Version 3.0.0
23
46
  - Se mejora la forma de obtener valores de media movil para todas las variables en las que se usa.
24
47
  - Se corrigen bugs debido a nodos con pocas operaciones.
@@ -1,14 +1,14 @@
1
1
  sarapy/__init__.py,sha256=aVoywqGSscYYDycLaYJnz08dlQabl9gH0h4Q5KtHM9o,74
2
- sarapy/version.py,sha256=FwfAbPJHI1L9iu0rJTFUu9Rw3s7BA7POU73XKvHfy0E,48
2
+ sarapy/version.py,sha256=nu21TnYDRA1Ea8izdV-DvsKrdGiTFxIbDKxBSxFh5lo,48
3
3
  sarapy/analysis/FeaturesResume.py,sha256=fqKpDy7Py3QHUMtrS8r-KE25ah4HjkJxBKoZtHdORAQ,31946
4
4
  sarapy/analysis/__init__.py,sha256=i6QGXmnuA-k6Gh6639TinluogMhLGIiL-tiR_S2i2Ok,74
5
5
  sarapy/dataProcessing/GeoProcessor.py,sha256=ARjgKTXDVdf_cFCXyFmzlnmmmay3HG3q-yeJ9QrAcQU,5919
6
- sarapy/dataProcessing/OpsProcessor.py,sha256=LaWKdVzPfSmFuuQsqalW5cySJ0kQf9wnfEniXUBYyr8,19306
6
+ sarapy/dataProcessing/OpsProcessor.py,sha256=bK-l5d82-ZTcv169VEM7T7KlFtun24flvcvwXZFe5jw,19431
7
7
  sarapy/dataProcessing/TLMSensorDataProcessor.py,sha256=NhRxMoA4SHwyhD61xn6m5UIp1ZrDhEnHaFfhveMJLRQ,3689
8
8
  sarapy/dataProcessing/TimeSeriesProcessor.py,sha256=aig3A3_SCa9FVSWxGWiapBUX7Lj9Wi1BVyZi-XXZZYQ,6414
9
9
  sarapy/dataProcessing/__init__.py,sha256=Kqs5sFtq6RMEa3KLJFbsGRoYsIxHL1UUGMuplyCyQFk,200
10
10
  sarapy/mlProcessors/FertilizerFMCreator.py,sha256=LNi86CI6eVuQ0_UBVJNd_-L79fcY2-zY2NCm9ypl6OM,2354
11
- sarapy/mlProcessors/FertilizerTransformer.py,sha256=0VkW1Kqnc0LMS1HgaVJvnsyg4MwAG-9ocw047_u7-U8,3119
11
+ sarapy/mlProcessors/FertilizerTransformer.py,sha256=MTsuplwuRdDMVzycRRYZa98ZOEgRhBcjaDWQg6kyph4,8933
12
12
  sarapy/mlProcessors/PlantinClassifier.py,sha256=yNck3R8wGfy6rjb8Q2mxVdu63NWJgJ6UmqUORa2qvbk,12491
13
13
  sarapy/mlProcessors/PlantinFMCreator.py,sha256=y8rdkUb-84-ONa4kJOY2R2zAfuOXtUJVBEhUPhDncyY,7852
14
14
  sarapy/mlProcessors/__init__.py,sha256=wHnqLn15KRCOYI9WWS8_ArraG_c4UEfDCi19muwjN14,335
@@ -22,8 +22,11 @@ sarapy/stats/stats.py,sha256=eVmi6w9QcwvwuDK3yOr1Z8wQV-1oT3QJujDqWZFYzGc,11424
22
22
  sarapy/utils/__init__.py,sha256=TD_-dGgPQBD13hyf2OqDUET0XZOXTduJD1ht8tjZF_0,257
23
23
  sarapy/utils/plotting.py,sha256=kX-eYw618urMcUBkNPviQZdBziDc_TR3GInTsO90kU4,4065
24
24
  sarapy/utils/utils.py,sha256=NSSeZHeLnQWcFa6vfJ2nVkptX2dIyiCMlZPBmsgEvjo,7106
25
- sarapy-3.0.0.dist-info/LICENCE,sha256=N00sU3vSQ6F5c2vML9_qP4IFTkCPFFj0YGDB2CZP-uQ,840
26
- sarapy-3.0.0.dist-info/METADATA,sha256=mNSvlvzBpAm4_9fXTa5EALLidW7rfi6P7tR4bwzRRWc,7684
27
- sarapy-3.0.0.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
28
- sarapy-3.0.0.dist-info/top_level.txt,sha256=4mUGZXfX2Fw47fpY6MQkaJeuOs_8tbjLkkNp34DJWiA,7
29
- sarapy-3.0.0.dist-info/RECORD,,
25
+ sarapy-3.1.1.dist-info/licenses/LICENCE,sha256=N00sU3vSQ6F5c2vML9_qP4IFTkCPFFj0YGDB2CZP-uQ,840
26
+ test/checking_regresor.py,sha256=H5GZRD2ccy4yqjv9vr9PELpmbZVGY59lc2IDkKs7vTg,5931
27
+ test/probabilidades_test.py,sha256=4mGwuYNQGQd-LurL63pBpaaw5QHIN1PymBPhzRQB9Hc,3254
28
+ test/test_import.py,sha256=qYBVkwb3ACzGyYj4cVBNmrPAWw-cuITHgWP4oJYdrto,56
29
+ sarapy-3.1.1.dist-info/METADATA,sha256=ycUenXbdz48V6M64FL5Y92DS9KQ32qbNiuUqQYh_lIo,8395
30
+ sarapy-3.1.1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
31
+ sarapy-3.1.1.dist-info/top_level.txt,sha256=gsDJg1lRhlnLTvKqH039RW-fsHlHgX6ZTxaM6GheziQ,34
32
+ sarapy-3.1.1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (72.1.0)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -0,0 +1,5 @@
1
+ docs
2
+ examples
3
+ modelos
4
+ sarapy
5
+ test
@@ -0,0 +1,162 @@
1
+ import pickle
2
+ import pandas as pd
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+ from sklearn.preprocessing import PolynomialFeatures
6
+ from sklearn.linear_model import LinearRegression
7
+ from sklearn.preprocessing import StandardScaler
8
+
9
+ regresor = "modelos//regresor.pkl"
10
+ poly_features = "modelos//poly_features.pkl"
11
+
12
+ with open(regresor, "rb") as file:
13
+ loaded_regresor = pickle.load(file)
14
+
15
+ with open(poly_features, "rb") as file:
16
+ loaded_poly_features = pickle.load(file)
17
+
18
+ coeficientes = loaded_regresor.coef_.flatten()
19
+ intercept = loaded_regresor.intercept_
20
+ index = np.arange(len(coeficientes)) + 1
21
+ distorsiones = np.linspace(0, 15, 1000)
22
+ X_poly = loaded_poly_features.transform(distorsiones.reshape(-1, 1))
23
+ predicciones = loaded_regresor.predict(X_poly)
24
+
25
+ ##genero un dataframe con los coeficientes
26
+ labels = [f"Exponente/Coeficiente {i}" for i in index]
27
+ tabla = pd.DataFrame(coeficientes, labels).rename(columns={0:"Valores"})
28
+ tabla.loc[len(tabla)+1] = intercept
29
+ tabla = tabla.rename(index={len(tabla):"intercepción"})
30
+
31
+ #grafico de líneas de los coeficientes vs su índice
32
+ plt.figure(figsize=(10, 6))
33
+ plt.plot(index, coeficientes, marker='o', linestyle='-', color='b')
34
+ plt.title('Coeficientes del Regresor Polinómico')
35
+ plt.xlabel('Índice del Coeficiente')
36
+ plt.ylabel('Valor del Coeficiente')
37
+ plt.grid(True)
38
+ plt.axhline(0, color='black', linewidth=0.8, linestyle='--')
39
+ plt.show()
40
+
41
+ # gráfico de dispersión de distorsión vs predicciones
42
+ plt.figure(figsize=(10, 6))
43
+ plt.scatter(distorsiones, predicciones, color='r', alpha=0.6)
44
+ plt.title('Score de Fertilizante vs Predicciones en Gramos')
45
+ plt.xlabel('Score de Fertilizante (SC_FT)')
46
+ plt.ylabel('Predicciones de Gramos de Fertilizante')
47
+ plt.grid(True)
48
+ plt.show()
49
+
50
+ inf, sup = 7, 13
51
+ distorsiones[int(inf/0.1):int(sup/0.1+1)].mean()
52
+ predicciones[int(inf/0.1):int(sup/0.1+1)].mean()
53
+
54
+
55
+ # Definición de tramos: (x1, x2, y1, y2)
56
+ segmentos1 = [(0, 4, 0, 0.821),
57
+ (4, 7, 0.821, 5),
58
+ (7, 9.5, 5, 5.41),
59
+ (9.5, 13, 5.41, 8),
60
+ (13, 15, 8, 9)]
61
+ segmentos2 = [(0, 4, 0, 0.821),
62
+ (4, 7, 0.821, 5),
63
+ # (7, 9.5, 5, 5.41),
64
+ (7, 13, 5, 8),
65
+ (13, 15, 8, 9)]
66
+
67
+ def get_line(x1, x2, y1, y2):
68
+ m = (y2 - y1) / (x2 - x1)
69
+ b = y1 - m * x1
70
+ return m, b
71
+
72
+ def piecewise_linear(x, segmentos, lines):
73
+ for (x1, x2, _, _), (m, b) in zip(segmentos, lines):
74
+ if x1 <= x <= x2:
75
+ return m * x + b
76
+ raise ValueError("x fuera de rango")
77
+
78
+ lines1 = [get_line(*seg) for seg in segmentos1]
79
+ lines2 = [get_line(*seg) for seg in segmentos2]
80
+
81
+ # Ejemplo
82
+ ys1 = np.array([piecewise_linear(x, segmentos1, lines1) for x in distorsiones])
83
+ ys2 = np.array([piecewise_linear(x, segmentos2, lines2) for x in distorsiones])
84
+
85
+ # gráfico de dispersión de distorsión vs predicciones
86
+ plt.figure(figsize=(10, 6))
87
+ handles, labels = plt.gca().get_legend_handles_labels()
88
+ order = [2, 0, 1]
89
+ plt.scatter(distorsiones, predicciones, color='r', alpha=0.5, label="Predicciones actuales",zorder=2)
90
+ plt.scatter(distorsiones, ys1, color='g', alpha=0.5, label="Propuesta 1",zorder=1)
91
+ plt.scatter(distorsiones, ys2, color='b', alpha=0.5, label="Propuesta 2",zorder=0)
92
+ plt.title('Score de Fertilizante vs Predicciones en Gramos')
93
+ plt.xlabel('Score de Fertilizante (SC_FT)')
94
+ plt.ylabel('Predicciones de Gramos de Fertilizante')
95
+ plt.grid(True)
96
+ plt.legend()
97
+ plt.show()
98
+
99
+
100
+ ### ************* Obteniendo nuevos regresores a partir de ys1 y ys2 ************* ###
101
+ X = distorsiones.reshape(-1, 1)
102
+
103
+ poly = PolynomialFeatures(
104
+ degree=12,
105
+ include_bias=True # incluye el término β0
106
+ )
107
+
108
+ X_poly = poly.fit_transform(X)
109
+
110
+ modelo1 = LinearRegression(fit_intercept=False)
111
+ modelo1.fit(X_poly, ys1)
112
+ modelo2 = LinearRegression(fit_intercept=False)
113
+ modelo2.fit(X_poly, ys2)
114
+
115
+ ys1_pred = modelo1.predict(X_poly)
116
+ ys2_pred = modelo2.predict(X_poly)
117
+
118
+ # gráfico de dispersión de distorsión vs predicciones
119
+ plt.figure(figsize=(10, 6))
120
+ handles, labels = plt.gca().get_legend_handles_labels()
121
+ order = [2, 0, 1]
122
+ plt.scatter(distorsiones, ys1, color='g', alpha=0.1, label="Función 1 Hardcodeada ",zorder=1)
123
+ plt.scatter(distorsiones, ys1_pred, color="#5612af", alpha=0.5, label="Regresor 1 - Orden 12",zorder=2)
124
+ plt.title('Comparación Función de Propuesta vs Regresor 1 de orden 12')
125
+ plt.xlabel('Score de Fertilizante (SC_FT)')
126
+ plt.ylabel('Predicciones de Gramos de Fertilizante')
127
+ plt.grid(True)
128
+ plt.legend()
129
+ plt.show()
130
+
131
+ # gráfico de dispersión de distorsión vs predicciones
132
+ plt.figure(figsize=(10, 6))
133
+ handles, labels = plt.gca().get_legend_handles_labels()
134
+ order = [2, 0, 1]
135
+ plt.scatter(distorsiones, ys2, color='b', alpha=0.1, label="Función 2 Hardcodeada ",zorder=1)
136
+ plt.scatter(distorsiones, ys2_pred, color="#12af12", alpha=0.5, label="Regresor 2 - Orden 12",zorder=2)
137
+ plt.title('Comparación Función de Propuesta vs Regresor 2 de orden 12')
138
+ plt.xlabel('Score de Fertilizante (SC_FT)')
139
+ plt.ylabel('Predicciones de Gramos de Fertilizante')
140
+ plt.grid(True)
141
+ plt.legend()
142
+ plt.show()
143
+
144
+ # gráfico de dispersión de distorsión vs predicciones
145
+ plt.figure(figsize=(10, 6))
146
+ handles, labels = plt.gca().get_legend_handles_labels()
147
+ order = [2, 0, 1]
148
+ plt.scatter(distorsiones, ys1_pred, color="#5612af", alpha=0.1, label="Regresor 1 - Orden 12",zorder=1)
149
+ plt.scatter(distorsiones, ys2_pred, color="#12af12", alpha=0.5, label="Regresor 2 - Orden 12",zorder=2)
150
+ plt.title('Regresor 1 vs Regresor 2 de orden 12')
151
+ plt.xlabel('Score de Fertilizante (SC_FT)')
152
+ plt.ylabel('Predicciones de Gramos de Fertilizante')
153
+ plt.grid(True)
154
+ plt.legend()
155
+ plt.show()
156
+
157
+ ## Guardo el modelo 2 y las características polinómicas
158
+ with open("modelos//regresor_v2.pkl", "wb") as file:
159
+ pickle.dump(modelo2, file)
160
+
161
+ with open("modelos//poly_features_v2.pkl", "wb") as file:
162
+ pickle.dump(poly, file)
@@ -0,0 +1,77 @@
1
+ from sarapy.utils.plotting import plotTemporalData
2
+ import pandas as pd
3
+ import numpy as np
4
+ import os
5
+ import sarapy.utils.getRawOperations as getRawOperations
6
+ from sarapy.dataProcessing import OpsProcessor
7
+ from sarapy.preprocessing import TransformInputData
8
+ from sarapy.dataProcessing import TLMSensorDataProcessor
9
+ import sarapy.stats.stats as stats
10
+
11
+ tlmsde = TLMSensorDataProcessor.TLMSensorDataProcessor()
12
+
13
+ nodo = "UPM017N"
14
+
15
+ data_path = os.path.join(os.getcwd(), f"examples\\2025-04-10\\{nodo}\\data.json")
16
+ historical_data_path = os.path.join(os.getcwd(), f"examples\\2025-04-10\\{nodo}\\historical-data.json")
17
+
18
+ raw_data = pd.read_json(data_path, orient="records").to_dict(orient="records")
19
+ raw_data2 = pd.read_json(historical_data_path, orient="records").to_dict(orient="records")
20
+
21
+ transform_input_data = TransformInputData.TransformInputData()
22
+
23
+ raw_ops = getRawOperations.getRawOperations(raw_data, raw_data2)
24
+ datum = transform_input_data.fit_transform(raw_ops)[:,2]
25
+ telemetria = tlmsde.fit_transform(datum)
26
+ mode = telemetria[:,tlmsde.dataPositions["MODEFlag"]]
27
+ dstpt = telemetria[:,tlmsde.dataPositions["DSTRPT"]]
28
+
29
+ op = OpsProcessor.OpsProcessor(classifier_file='modelos\\pipeline_rf.pkl', imputeDistances = False,
30
+ regresor_file='modelos\\regresor.pkl', poly_features_file='modelos\\poly_features.pkl')
31
+ op.operationsDict
32
+ data_processed = op.processOperations(raw_ops)
33
+
34
+ #paso la lista de operaciones a un dataframe
35
+ df = pd.DataFrame(data_processed)
36
+ df["mode"] = mode
37
+ df["dstpt"] = dstpt
38
+ df["nodo"] = nodo
39
+ ma = stats.getMA(df["dstpt"].values, window_size=104, mode='same')
40
+ df["dstpt_ma"] = ma
41
+ ##me quedo con los datos donde mode==0
42
+ df = df[df["mode"] == 0]
43
+
44
+ #calculo el resumen del sensor
45
+ resumen = stats.resumen_sensor(df, values_col="dstpt_ma", pctbajo_value=1, pctalto_value=14)
46
+ print(resumen)
47
+ #calculo la probabilidad de saturación
48
+ prob_saturacion = stats.calcular_prob_saturacion(ma[2000:], saturation_mode="alto", umbrales=(1, 14),
49
+ alpha=0.2, beta=0.2)
50
+ print(f"Probabilidad de saturación: {prob_saturacion}")
51
+
52
+ # Definimos grilla de alpha y beta
53
+ alpha_vals = np.linspace(0, 1, 20)
54
+ beta_vals = np.linspace(0, 1, 20)
55
+
56
+ # Creamos matriz de resultados
57
+ Psat_matrix = np.zeros((len(alpha_vals), len(beta_vals)))
58
+
59
+ for i, alpha in enumerate(alpha_vals):
60
+ for j, beta in enumerate(beta_vals):
61
+ Psat = prob_saturacion = stats.calcular_prob_saturacion(ma, saturation_mode="alto", umbrales=(1, 14),
62
+ alpha=alpha, beta=beta)
63
+ Psat_matrix[i, j] = Psat
64
+
65
+ import matplotlib.pyplot as plt
66
+ import seaborn as sns
67
+
68
+ plt.figure(figsize=(10, 8))
69
+ sns.heatmap(Psat_matrix, xticklabels=np.round(beta_vals, 2), yticklabels=np.round(alpha_vals, 2), cmap="coolwarm")
70
+ plt.title("Evolución de Psat en función de α (vertical) y β (horizontal)")
71
+ plt.xlabel("β (peso de kurtosis)")
72
+ plt.ylabel("α (peso de skewness)")
73
+ plt.show()
74
+
75
+ colors = ["#0b3256","#fa0000"]
76
+ plotTemporalData(df, nodos = [nodo], columnas = ["dstpt_ma"], title = "DSTPT",sameFig = False, show = True,
77
+ save = False, filename = "dstpt_plot.png", figsize=(15, 10), colors=colors)
test/test_import.py ADDED
@@ -0,0 +1,5 @@
1
+ def test_import():
2
+ import sarapy
3
+
4
+
5
+ test_import()
@@ -1 +0,0 @@
1
- sarapy