sarapy 2.2.0__py3-none-any.whl → 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sarapy/analysis/FeaturesResume.py +722 -0
- sarapy/analysis/__init__.py +3 -0
- sarapy/dataProcessing/OpsProcessor.py +68 -33
- sarapy/dataProcessing/TLMSensorDataProcessor.py +5 -2
- sarapy/mlProcessors/FertilizerTransformer.py +7 -5
- sarapy/mlProcessors/PlantinClassifier.py +120 -31
- sarapy/mlProcessors/PlantinFMCreator.py +25 -12
- sarapy/mlProcessors/__init__.py +11 -0
- sarapy/preprocessing/TransformInputData.py +3 -2
- sarapy/preprocessing/__init__.py +11 -2
- sarapy/stats/__init__.py +13 -1
- sarapy/stats/stats.py +5 -6
- sarapy/utils/__init__.py +3 -0
- sarapy/utils/utils.py +172 -0
- sarapy/version.py +2 -2
- {sarapy-2.2.0.dist-info → sarapy-3.0.0.dist-info}/METADATA +39 -1
- sarapy-3.0.0.dist-info/RECORD +29 -0
- sarapy/utils/amg_decoder.py +0 -125
- sarapy/utils/amg_ppk.py +0 -38
- sarapy/utils/getRawOperations.py +0 -20
- sarapy-2.2.0.dist-info/RECORD +0 -29
- {sarapy-2.2.0.dist-info → sarapy-3.0.0.dist-info}/LICENCE +0 -0
- {sarapy-2.2.0.dist-info → sarapy-3.0.0.dist-info}/WHEEL +0 -0
- {sarapy-2.2.0.dist-info → sarapy-3.0.0.dist-info}/top_level.txt +0 -0
sarapy/utils/utils.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
from typing import List, Tuple
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import pandas as pd
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from sarapy.analysis.FeaturesResume import FeaturesResume
|
|
7
|
+
|
|
8
|
+
def dataMerging(historical_data, post_processing_data, raw_data, nodoName = None, newColumns = False, asDF = False):
|
|
9
|
+
"""
|
|
10
|
+
Función para tomar historical_data y post_processing_data y formar una
|
|
11
|
+
sóla lista de diccionarios (json)
|
|
12
|
+
|
|
13
|
+
Si newColumns es False la función reemplaza los valores de tag_seedling y tag_fertilizer de historical_data,
|
|
14
|
+
sino genera dos nuevos campos llamados tag_seedling_classified y tag_fertilizer_estimated en historical_data.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
- historical_data (list): Lista de diccionarios con datos históricos (tipo json)
|
|
18
|
+
- post_processing_data (list): Lista de diccionarios con datos de post-procesamiento (tipo json)
|
|
19
|
+
- nodoName (str|None): Nombre del nodo al que pertenecen los datos. Por defecto es None
|
|
20
|
+
- newColumns (bool): Indica si se deben crear nuevas columnas en lugar de reemplazar las existentes.
|
|
21
|
+
- asDF (bool): Indica si se debe retornar como un dataframe o no
|
|
22
|
+
"""
|
|
23
|
+
#chequeo que historical_data y post_processing_data sean del mismo tamaño, sino rais
|
|
24
|
+
if len(historical_data) != len(post_processing_data):
|
|
25
|
+
raise ValueError("Las listas de datos históricos y de post-procesamiento no son del mismo tamaño.")
|
|
26
|
+
|
|
27
|
+
final_data = pd.DataFrame(historical_data)
|
|
28
|
+
post_data = pd.DataFrame(post_processing_data)
|
|
29
|
+
raw_data = pd.DataFrame(raw_data)
|
|
30
|
+
|
|
31
|
+
final_data['raw_tag_seedling'] = raw_data['raw_tag_seedling']
|
|
32
|
+
final_data['raw_tag_fertilizer'] = raw_data['raw_tag_fertilizer']
|
|
33
|
+
|
|
34
|
+
if not newColumns:
|
|
35
|
+
final_data['tag_seedling'] = post_data['tag_seedling']
|
|
36
|
+
final_data['tag_fertilizer'] = post_data['tag_fertilizer']
|
|
37
|
+
else:
|
|
38
|
+
final_data['tag_seedling_classified'] = post_data['tag_seedling']
|
|
39
|
+
final_data['tag_fertilizer_estimated'] = post_data['tag_fertilizer']
|
|
40
|
+
|
|
41
|
+
if nodoName:
|
|
42
|
+
final_data['nodo'] = nodoName
|
|
43
|
+
|
|
44
|
+
#retorno como lista de diccionarios (json)
|
|
45
|
+
if not asDF:
|
|
46
|
+
return final_data.to_dict(orient='records')
|
|
47
|
+
else:
|
|
48
|
+
return final_data
|
|
49
|
+
|
|
50
|
+
def getOutliersThresholds(data, q1 = 0.25, q3 = 0.75, k = 1.5):
|
|
51
|
+
"""Cálculo de los límites para detectar outliers a partir del rango intercuartil
|
|
52
|
+
|
|
53
|
+
data: array con los datos
|
|
54
|
+
q1: primer cuartil
|
|
55
|
+
q3: tercer cuartil
|
|
56
|
+
k: factor de escala
|
|
57
|
+
"""
|
|
58
|
+
# Calculo del rango intercuartil
|
|
59
|
+
q1 = np.quantile(data, q1)
|
|
60
|
+
q3 = np.quantile(data, q3)
|
|
61
|
+
iqr = q3 - q1
|
|
62
|
+
|
|
63
|
+
# Cálculo de los límites
|
|
64
|
+
lower = q1 - k * iqr
|
|
65
|
+
upper = q3 + k * iqr
|
|
66
|
+
|
|
67
|
+
return lower, upper
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def countingZeros(array: List[int], minimos_seguidos: int = 3) -> List[Tuple[int, int]]:
|
|
71
|
+
"""
|
|
72
|
+
Cuenta ceros consecutivos en un array binario (0s y 1s), retornando una lista de tuplas.
|
|
73
|
+
Cada tupla (n, k) indica que se encontraron 'n' secuencias de 'k' ceros consecutivos,
|
|
74
|
+
siempre que k >= minimos_seguidos.
|
|
75
|
+
|
|
76
|
+
Parameters:
|
|
77
|
+
array (List[int]): Lista binaria de 0s y 1s.
|
|
78
|
+
minimos_seguidos (int): Mínimo de ceros consecutivos a considerar.
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
List[Tuple[int, int]]: Lista de tuplas (n, k), ordenadas por k.
|
|
82
|
+
"""
|
|
83
|
+
contador = 0
|
|
84
|
+
resultados = {}
|
|
85
|
+
indexes = []
|
|
86
|
+
for i, val in enumerate(array):
|
|
87
|
+
if val == 0:
|
|
88
|
+
contador += 1
|
|
89
|
+
indexes.append(i)
|
|
90
|
+
else:
|
|
91
|
+
if contador >= minimos_seguidos:
|
|
92
|
+
if contador in resultados.keys():
|
|
93
|
+
resultados[contador][0] += 1
|
|
94
|
+
resultados[contador][1] += (indexes,)
|
|
95
|
+
indexes = []
|
|
96
|
+
else:
|
|
97
|
+
resultados[contador] = [1, (indexes,)]
|
|
98
|
+
indexes = []
|
|
99
|
+
contador = 0
|
|
100
|
+
|
|
101
|
+
# Por si la secuencia termina en ceros
|
|
102
|
+
if contador >= minimos_seguidos:
|
|
103
|
+
if contador in resultados.keys():
|
|
104
|
+
resultados[contador][0] += 1
|
|
105
|
+
resultados[contador][1] += (indexes,)
|
|
106
|
+
indexes = []
|
|
107
|
+
else:
|
|
108
|
+
resultados[contador] = [1, (indexes,)]
|
|
109
|
+
|
|
110
|
+
# retorna [cantidad de ocurrencias, longitud de ceros, indices de ocurrencias]
|
|
111
|
+
return sorted([(v[0], k, v[1]) for k, v in resultados.items()])
|
|
112
|
+
|
|
113
|
+
def get_lat_long_from_indices(df: pd.DataFrame, indices: List[List[int]]) -> Tuple[float, float]:
|
|
114
|
+
"""
|
|
115
|
+
Obtiene la latitud y longitud a partir de una lista de índices en un DataFrame.
|
|
116
|
+
|
|
117
|
+
Parameters:
|
|
118
|
+
df (pd.DataFrame): DataFrame que contiene las columnas 'latitude' y 'longitude'.
|
|
119
|
+
indices (List[int]): Lista de listas de índices para buscar las coordenadas.
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
Tuple[float, float]: Tupla con la latitud y longitud correspondientes.
|
|
123
|
+
"""
|
|
124
|
+
latitudes = []
|
|
125
|
+
longitudes = []
|
|
126
|
+
nodos = []
|
|
127
|
+
for index_list in indices:
|
|
128
|
+
for index in index_list:
|
|
129
|
+
latitudes.append(df.iloc[index]["latitude"])
|
|
130
|
+
longitudes.append(df.iloc[index]["longitude"])
|
|
131
|
+
nodos.append(df.iloc[index]["nodo"])
|
|
132
|
+
return [nodos, latitudes, longitudes]
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def readingFolders(raiz: str | Path, ignorar_ocultas: bool = True, ordenar: bool = True) -> list[str]:
|
|
136
|
+
raiz = Path(raiz)
|
|
137
|
+
if not raiz.is_dir():
|
|
138
|
+
raise NotADirectoryError(f"La ruta no es una carpeta: {raiz}")
|
|
139
|
+
|
|
140
|
+
nombres = [p.name for p in raiz.iterdir() if p.is_dir()]
|
|
141
|
+
if ignorar_ocultas:
|
|
142
|
+
nombres = [n for n in nombres if not n.startswith(".")]
|
|
143
|
+
if ordenar:
|
|
144
|
+
nombres.sort()
|
|
145
|
+
return nombres
|
|
146
|
+
|
|
147
|
+
def computar_resumenes_por_filtro(nodos_ok, merged_cache, filtro, outliers):
|
|
148
|
+
"""
|
|
149
|
+
Función para computar resúmenes filtrados por un criterio específico.
|
|
150
|
+
"""
|
|
151
|
+
conteos, resumenes, dstp_ptmas, delta_dcdp, time_ac = {}, {}, {}, {}, {}
|
|
152
|
+
for nodo in nodos_ok:
|
|
153
|
+
fr = FeaturesResume(merged_cache[nodo], info=nodo, filtrar=filtro)
|
|
154
|
+
fr.removeOutliers(outliers)
|
|
155
|
+
conteos[nodo] = fr.data["tag_seedling"].value_counts(normalize=True)
|
|
156
|
+
resumenes[nodo] = fr.getResume(to="all")
|
|
157
|
+
dstp_ptmas[nodo] = fr.getSensorMA()
|
|
158
|
+
delta_dcdp[nodo] = fr.data["ratio_dCdP"]
|
|
159
|
+
time_ac[nodo] = fr.data["time_ac"]
|
|
160
|
+
return conteos, resumenes
|
|
161
|
+
|
|
162
|
+
def metricas_desde_resumenes(nodos_ok, resumenes, stats):
|
|
163
|
+
"""Devuelve dict nombre_metrica -> vector numpy en el orden de nodos_ok."""
|
|
164
|
+
return {
|
|
165
|
+
"nodo": [n for n in nodos_ok],
|
|
166
|
+
"time_ac": np.array([resumenes[n]["time_ac"][stats] for n in nodos_ok]),
|
|
167
|
+
"deltaO": np.array([resumenes[n]["deltaO"][stats] for n in nodos_ok]),
|
|
168
|
+
"ratio_dCdP":np.array([resumenes[n]["ratio_dCdP"][stats]for n in nodos_ok]),
|
|
169
|
+
"precision": np.array([resumenes[n]["precision"][stats] for n in nodos_ok]),
|
|
170
|
+
"distances": np.array([resumenes[n]["distances"][stats] for n in nodos_ok]),
|
|
171
|
+
"dst_pt": np.array([resumenes[n]["dst_pt"][stats] for n in nodos_ok]),
|
|
172
|
+
}
|
sarapy/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
## Version of the package
|
|
2
|
-
__version__ = "
|
|
1
|
+
## Version of the package
|
|
2
|
+
__version__ = "3.0.0"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: sarapy
|
|
3
|
-
Version:
|
|
3
|
+
Version: 3.0.0
|
|
4
4
|
Home-page: https://github.com/lucasbaldezzari/sarapy
|
|
5
5
|
Author: Lucas Baldezzari
|
|
6
6
|
Author-email: Lucas Baldezzari <lmbaldezzari@gmail.com>
|
|
@@ -19,6 +19,44 @@ Requires-Dist: geopy
|
|
|
19
19
|
|
|
20
20
|
Library for processing SARAPICO project metadata of _AMG SA_.
|
|
21
21
|
|
|
22
|
+
#### Version 3.0.0
|
|
23
|
+
- Se mejora la forma de obtener valores de media movil para todas las variables en las que se usa.
|
|
24
|
+
- Se corrigen bugs debido a nodos con pocas operaciones.
|
|
25
|
+
- Se corrigen errores a la hora de pasar parámetros a los métodos de algunas clases.
|
|
26
|
+
- Se configuran parámetros de fmcreator y plantin_classifier para el reetiquetado, los mismos son:
|
|
27
|
+
|
|
28
|
+
kwargs_fmcreator = {"imputeDistances":True, "distanciaMedia":1.8, "umbral_precision":0.3,
|
|
29
|
+
"dist_mismo_lugar":0.2, "max_dist":100,
|
|
30
|
+
"umbral_ratio_dCdP":2, "deltaO_medio":4,
|
|
31
|
+
"impute_ratiodcdp": True, "umbral_impute_ratiodcdp": -0.5,
|
|
32
|
+
"deltaO_ma": True, "deltaO_ma_window": 26}
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
##argumentos del método PlantinClassifier.clasiffy()
|
|
36
|
+
kwargs_classifier = {"proba_threshold":0.4,
|
|
37
|
+
"use_proba_ma":False,
|
|
38
|
+
"proba_ma_window":10,
|
|
39
|
+
"update_samePlace":True,
|
|
40
|
+
"update_dstpt":True,
|
|
41
|
+
"umbral_proba_dstpt":0.5,
|
|
42
|
+
"umbral_bajo_dstpt":1.5,
|
|
43
|
+
"use_ma":True,
|
|
44
|
+
"dstpt_ma_window":62,
|
|
45
|
+
"use_min_dstpt":False,
|
|
46
|
+
"factor":0.1,
|
|
47
|
+
|
|
48
|
+
"useRatioStats":False,
|
|
49
|
+
"std_weight":1.,
|
|
50
|
+
"useDistancesStats":False,
|
|
51
|
+
"ratio_dcdp_umbral":0.1,
|
|
52
|
+
"dist_umbral":0.5,
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
#### Version 2.3.0
|
|
56
|
+
|
|
57
|
+
- Se agregan funcionalidades.
|
|
58
|
+
- Se corrigen errores menores.
|
|
59
|
+
|
|
22
60
|
#### Version 2.2.0
|
|
23
61
|
|
|
24
62
|
- Se agrega baseDeltaP en PlantinFMCreator para poder dividir el timestamp de la electrónica por 10, ya que se envía en décimas de segundo.
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
sarapy/__init__.py,sha256=aVoywqGSscYYDycLaYJnz08dlQabl9gH0h4Q5KtHM9o,74
|
|
2
|
+
sarapy/version.py,sha256=FwfAbPJHI1L9iu0rJTFUu9Rw3s7BA7POU73XKvHfy0E,48
|
|
3
|
+
sarapy/analysis/FeaturesResume.py,sha256=fqKpDy7Py3QHUMtrS8r-KE25ah4HjkJxBKoZtHdORAQ,31946
|
|
4
|
+
sarapy/analysis/__init__.py,sha256=i6QGXmnuA-k6Gh6639TinluogMhLGIiL-tiR_S2i2Ok,74
|
|
5
|
+
sarapy/dataProcessing/GeoProcessor.py,sha256=ARjgKTXDVdf_cFCXyFmzlnmmmay3HG3q-yeJ9QrAcQU,5919
|
|
6
|
+
sarapy/dataProcessing/OpsProcessor.py,sha256=LaWKdVzPfSmFuuQsqalW5cySJ0kQf9wnfEniXUBYyr8,19306
|
|
7
|
+
sarapy/dataProcessing/TLMSensorDataProcessor.py,sha256=NhRxMoA4SHwyhD61xn6m5UIp1ZrDhEnHaFfhveMJLRQ,3689
|
|
8
|
+
sarapy/dataProcessing/TimeSeriesProcessor.py,sha256=aig3A3_SCa9FVSWxGWiapBUX7Lj9Wi1BVyZi-XXZZYQ,6414
|
|
9
|
+
sarapy/dataProcessing/__init__.py,sha256=Kqs5sFtq6RMEa3KLJFbsGRoYsIxHL1UUGMuplyCyQFk,200
|
|
10
|
+
sarapy/mlProcessors/FertilizerFMCreator.py,sha256=LNi86CI6eVuQ0_UBVJNd_-L79fcY2-zY2NCm9ypl6OM,2354
|
|
11
|
+
sarapy/mlProcessors/FertilizerTransformer.py,sha256=0VkW1Kqnc0LMS1HgaVJvnsyg4MwAG-9ocw047_u7-U8,3119
|
|
12
|
+
sarapy/mlProcessors/PlantinClassifier.py,sha256=yNck3R8wGfy6rjb8Q2mxVdu63NWJgJ6UmqUORa2qvbk,12491
|
|
13
|
+
sarapy/mlProcessors/PlantinFMCreator.py,sha256=y8rdkUb-84-ONa4kJOY2R2zAfuOXtUJVBEhUPhDncyY,7852
|
|
14
|
+
sarapy/mlProcessors/__init__.py,sha256=wHnqLn15KRCOYI9WWS8_ArraG_c4UEfDCi19muwjN14,335
|
|
15
|
+
sarapy/preprocessing/DistancesImputer.py,sha256=NvbVAh5m0yFxVgDbEFnEX7RSG13qLjO7i2gqjDAWsf4,9106
|
|
16
|
+
sarapy/preprocessing/FertilizerImputer.py,sha256=zK6ONAilwPHvj-bC7yxnQYOkDBCCkWh6__57vYK9anM,1490
|
|
17
|
+
sarapy/preprocessing/TransformInputData.py,sha256=gT0S_ANSmSODPru4DVK7qpA7ZqnRoPwNyLkV-VJWvAU,8584
|
|
18
|
+
sarapy/preprocessing/TransformToOutputData.py,sha256=2hSeFkrSt1OO_jiX4SQJtL3Dhm_9xLy7zCgkj8jo9OE,3137
|
|
19
|
+
sarapy/preprocessing/__init__.py,sha256=2if1rcq8WCk8u4M3bHcE_tY2hLmZxwNG4qdLNJR1Ixg,331
|
|
20
|
+
sarapy/stats/__init__.py,sha256=X4IZsG2TxZUtXYmONvVJymHInnLHMqiThmW6U2ZMd8U,258
|
|
21
|
+
sarapy/stats/stats.py,sha256=eVmi6w9QcwvwuDK3yOr1Z8wQV-1oT3QJujDqWZFYzGc,11424
|
|
22
|
+
sarapy/utils/__init__.py,sha256=TD_-dGgPQBD13hyf2OqDUET0XZOXTduJD1ht8tjZF_0,257
|
|
23
|
+
sarapy/utils/plotting.py,sha256=kX-eYw618urMcUBkNPviQZdBziDc_TR3GInTsO90kU4,4065
|
|
24
|
+
sarapy/utils/utils.py,sha256=NSSeZHeLnQWcFa6vfJ2nVkptX2dIyiCMlZPBmsgEvjo,7106
|
|
25
|
+
sarapy-3.0.0.dist-info/LICENCE,sha256=N00sU3vSQ6F5c2vML9_qP4IFTkCPFFj0YGDB2CZP-uQ,840
|
|
26
|
+
sarapy-3.0.0.dist-info/METADATA,sha256=mNSvlvzBpAm4_9fXTa5EALLidW7rfi6P7tR4bwzRRWc,7684
|
|
27
|
+
sarapy-3.0.0.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
|
|
28
|
+
sarapy-3.0.0.dist-info/top_level.txt,sha256=4mUGZXfX2Fw47fpY6MQkaJeuOs_8tbjLkkNp34DJWiA,7
|
|
29
|
+
sarapy-3.0.0.dist-info/RECORD,,
|
sarapy/utils/amg_decoder.py
DELETED
|
@@ -1,125 +0,0 @@
|
|
|
1
|
-
from dateutil import parser
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
"""
|
|
5
|
-
En 'estructura_datos' se registra cuantos bits se ocupan para cada dato.
|
|
6
|
-
Por ejemplo, los primeros 6 bits para anio, los siguientes 4 para mes y asi.
|
|
7
|
-
"""
|
|
8
|
-
|
|
9
|
-
estructura_datos = {
|
|
10
|
-
"anio": 6,
|
|
11
|
-
"mes": 4,
|
|
12
|
-
"dia": 5,
|
|
13
|
-
"hora": 5,
|
|
14
|
-
"minutos": 6,
|
|
15
|
-
"segundos": 6,
|
|
16
|
-
"operacion": 16,
|
|
17
|
-
"PT": 2,
|
|
18
|
-
"FR": 2,
|
|
19
|
-
"OR": 2,
|
|
20
|
-
"MO": 2,
|
|
21
|
-
"TLM_NPDP": 64,
|
|
22
|
-
"TLM_GPDP": 16,
|
|
23
|
-
"ID_NPDP": -1,
|
|
24
|
-
"ID_OPRR": -1,
|
|
25
|
-
"ID_GPDP": -1,
|
|
26
|
-
"ID_CDLL": -1,
|
|
27
|
-
"size_GNSS": 16,
|
|
28
|
-
"Latitud": 32,
|
|
29
|
-
"Longitud": 32,
|
|
30
|
-
"Precision": 32,
|
|
31
|
-
} # Agregar mas campos segun sea necesario
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
def extraer_bits(trama, inicio, n_bits):
|
|
35
|
-
try:
|
|
36
|
-
byte_index = inicio // 8
|
|
37
|
-
bit_offset = inicio % 8
|
|
38
|
-
|
|
39
|
-
valor = 0
|
|
40
|
-
bits_procesados = 0
|
|
41
|
-
while bits_procesados < n_bits:
|
|
42
|
-
byte_actual = trama[byte_index]
|
|
43
|
-
bits_restantes = n_bits - bits_procesados
|
|
44
|
-
bits_a_extraer = min(bits_restantes, 8 - bit_offset)
|
|
45
|
-
|
|
46
|
-
mascara = (1 << bits_a_extraer) - 1
|
|
47
|
-
bits_extraidos = (byte_actual >> (8 - bit_offset - bits_a_extraer)) & mascara
|
|
48
|
-
|
|
49
|
-
valor = (valor << bits_a_extraer) | bits_extraidos
|
|
50
|
-
|
|
51
|
-
bits_procesados += bits_a_extraer
|
|
52
|
-
byte_index += 1
|
|
53
|
-
bit_offset = 0
|
|
54
|
-
|
|
55
|
-
return valor
|
|
56
|
-
except IndexError as ex:
|
|
57
|
-
raise ex
|
|
58
|
-
except Exception as ex:
|
|
59
|
-
print(f"Error inesperado en extraer_bits: {ex}")
|
|
60
|
-
raise ex
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
def process_dynamic_id(trama, inicio):
|
|
64
|
-
# Lee el primer byte para determinar la longitud del ID
|
|
65
|
-
longitud_id_bytes = extraer_bits(trama, inicio, 8) # 8 bits = 1 byte
|
|
66
|
-
inicio += 8 # Avanza el indice de inicio 8 bits para pasar al contenido del ID
|
|
67
|
-
|
|
68
|
-
# Ahora, extrae el ID basandose en la longitud obtenida
|
|
69
|
-
id_value = extraer_bits(trama, inicio, longitud_id_bytes * 8) # Convierte la longitud a bits
|
|
70
|
-
inicio += longitud_id_bytes * 8 # Avanza el indice de inicio para pasar al final del ID
|
|
71
|
-
|
|
72
|
-
return id_value, inicio
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
def process_data(trama):
|
|
76
|
-
|
|
77
|
-
if not isinstance(trama, bytes):
|
|
78
|
-
raise ValueError("La trama debe ser un bytearray")
|
|
79
|
-
|
|
80
|
-
inicio = 0
|
|
81
|
-
resultado = {}
|
|
82
|
-
for campo, n_bits in estructura_datos.items():
|
|
83
|
-
try:
|
|
84
|
-
if n_bits == -1: # Verifica si el campo es dinamico
|
|
85
|
-
resultado[campo], inicio = process_dynamic_id(trama, inicio)
|
|
86
|
-
else:
|
|
87
|
-
if campo == "TLM_NPDP" or campo == "TLM_GPDP":
|
|
88
|
-
resultado[campo] = trama[inicio // 8: (inicio + n_bits) // 8]
|
|
89
|
-
else:
|
|
90
|
-
resultado[campo] = extraer_bits(trama, inicio, n_bits)
|
|
91
|
-
inicio += n_bits
|
|
92
|
-
if campo == "Precision":
|
|
93
|
-
# Suponiendo que size_GNSS sigue inmediatamente despues de Precision
|
|
94
|
-
raw = trama[inicio // 8: (inicio // 8 ) + resultado["size_GNSS"] - 12]
|
|
95
|
-
resultado["RAW"] = raw
|
|
96
|
-
except IndexError as ex:
|
|
97
|
-
print(f"Error al procesar campo {campo}: {ex}. Posiblemente la trama es mas corta de lo esperado.")
|
|
98
|
-
break # Salir del bucle en caso de un error de indice
|
|
99
|
-
except Exception as ex:
|
|
100
|
-
print(f"Error inesperado al procesar campo {campo}: {ex}")
|
|
101
|
-
break # Salir del bucle en caso de errores inesperados
|
|
102
|
-
|
|
103
|
-
if len(set(estructura_datos.keys()) - set(resultado.keys())) == 0:
|
|
104
|
-
|
|
105
|
-
anio = 2020 + resultado["anio"]
|
|
106
|
-
mes = str(resultado["mes"]).zfill(2)
|
|
107
|
-
dia = str(resultado["dia"]).zfill(2)
|
|
108
|
-
hora = str(resultado["hora"]).zfill(2)
|
|
109
|
-
minutos = str(resultado["minutos"]).zfill(2)
|
|
110
|
-
segundos = str(resultado["segundos"]).zfill(2)
|
|
111
|
-
resultado["date_oprc"] = parser.parse(f"{anio}-{mes}-{dia}T{hora}:{minutos}:{segundos}+00:00")
|
|
112
|
-
|
|
113
|
-
resultado["Latitud"] = (resultado["Latitud"] - 2 ** 32) / 10 ** 7
|
|
114
|
-
resultado["Longitud"] = (resultado["Longitud"] - 2 ** 32) / 10 ** 7
|
|
115
|
-
|
|
116
|
-
del resultado["anio"]
|
|
117
|
-
del resultado["mes"]
|
|
118
|
-
del resultado["dia"]
|
|
119
|
-
del resultado["hora"]
|
|
120
|
-
del resultado["minutos"]
|
|
121
|
-
del resultado["segundos"]
|
|
122
|
-
del resultado["size_GNSS"]
|
|
123
|
-
|
|
124
|
-
return resultado
|
|
125
|
-
|
sarapy/utils/amg_ppk.py
DELETED
|
@@ -1,38 +0,0 @@
|
|
|
1
|
-
from base64 import b64decode
|
|
2
|
-
|
|
3
|
-
from sarapy.utils import amg_decoder
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
def main(hash_table, ppk_data):
|
|
7
|
-
|
|
8
|
-
ppk_results = []
|
|
9
|
-
|
|
10
|
-
for hash_table_entry_values in hash_table.values():
|
|
11
|
-
|
|
12
|
-
try:
|
|
13
|
-
|
|
14
|
-
serialized_datum = hash_table_entry_values["serialized_datum"]
|
|
15
|
-
raw_datum = bytes(b64decode(serialized_datum.encode("utf-8"))) # 'trama'
|
|
16
|
-
datum = amg_decoder.process_data(raw_datum)
|
|
17
|
-
|
|
18
|
-
if datum:
|
|
19
|
-
|
|
20
|
-
longitude, latitude, accuracy = "", "", 0 # ToDo: PPK (Fernando)
|
|
21
|
-
|
|
22
|
-
if longitude:
|
|
23
|
-
datum["Longitud"] = longitude
|
|
24
|
-
if latitude:
|
|
25
|
-
datum["Latitud"] = latitude
|
|
26
|
-
if accuracy != 0:
|
|
27
|
-
datum["Precision"] = accuracy
|
|
28
|
-
|
|
29
|
-
ppk_results.append({
|
|
30
|
-
"id_db_dw": hash_table_entry_values["id_db_dw"],
|
|
31
|
-
"id_db_h": hash_table_entry_values["id_db_h"],
|
|
32
|
-
**datum
|
|
33
|
-
})
|
|
34
|
-
|
|
35
|
-
except Exception as ex:
|
|
36
|
-
print(ex)
|
|
37
|
-
|
|
38
|
-
return ppk_results
|
sarapy/utils/getRawOperations.py
DELETED
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
import pandas as pd
|
|
2
|
-
from sarapy.utils import amg_ppk
|
|
3
|
-
import os
|
|
4
|
-
def getRawOperations(data, historical_data):
|
|
5
|
-
"""
|
|
6
|
-
Args:
|
|
7
|
-
data_file: Lista de diccionarios con la data
|
|
8
|
-
historical_data_file: Lista de diccionarios con historical_data
|
|
9
|
-
|
|
10
|
-
Returns the raw operations from the database.
|
|
11
|
-
"""
|
|
12
|
-
hash_table = {}
|
|
13
|
-
for datum in data:
|
|
14
|
-
hash_table[datum["timestamp"]] = {"id_db_dw": datum["id"], "id_db_h": 0, "serialized_datum": ""}
|
|
15
|
-
for historical_datum in historical_data:
|
|
16
|
-
if historical_datum["timestamp"] in hash_table:
|
|
17
|
-
hash_table[historical_datum["timestamp"]].update({"id_db_h": historical_datum["id"], "serialized_datum": historical_datum["datum"]})
|
|
18
|
-
ppk_results = amg_ppk.main(hash_table, []) # ToDo: PPK (Fernando)
|
|
19
|
-
|
|
20
|
-
return ppk_results
|
sarapy-2.2.0.dist-info/RECORD
DELETED
|
@@ -1,29 +0,0 @@
|
|
|
1
|
-
sarapy/__init__.py,sha256=aVoywqGSscYYDycLaYJnz08dlQabl9gH0h4Q5KtHM9o,74
|
|
2
|
-
sarapy/version.py,sha256=KOlO2hui0K8-Vgk9NSIA7DmWhS5E7jpA2nk5zqFERaE,51
|
|
3
|
-
sarapy/dataProcessing/GeoProcessor.py,sha256=ARjgKTXDVdf_cFCXyFmzlnmmmay3HG3q-yeJ9QrAcQU,5919
|
|
4
|
-
sarapy/dataProcessing/OpsProcessor.py,sha256=edoZBO-loAU-uVvjkNeFiRfUGFnaz2ImMsT25DMxDGM,17349
|
|
5
|
-
sarapy/dataProcessing/TLMSensorDataProcessor.py,sha256=RuITlryuSaIWvYyJwE5wxp85HVZ6mr5kUVALikfwS4g,3603
|
|
6
|
-
sarapy/dataProcessing/TimeSeriesProcessor.py,sha256=aig3A3_SCa9FVSWxGWiapBUX7Lj9Wi1BVyZi-XXZZYQ,6414
|
|
7
|
-
sarapy/dataProcessing/__init__.py,sha256=Kqs5sFtq6RMEa3KLJFbsGRoYsIxHL1UUGMuplyCyQFk,200
|
|
8
|
-
sarapy/mlProcessors/FertilizerFMCreator.py,sha256=LNi86CI6eVuQ0_UBVJNd_-L79fcY2-zY2NCm9ypl6OM,2354
|
|
9
|
-
sarapy/mlProcessors/FertilizerTransformer.py,sha256=PefMNrsvfqqjup0lcypzZB0IKzZbvTlTI03u4ITNuUo,3003
|
|
10
|
-
sarapy/mlProcessors/PlantinClassifier.py,sha256=PoPvtrqTXCmr0cLaMNRdDzhvzUJNZhLtvZNeE0qd_0Q,7905
|
|
11
|
-
sarapy/mlProcessors/PlantinFMCreator.py,sha256=Rp6Mx_bhe0tvcktaG8vC8Dq8LPsBzKx4IjfVledvy6I,6926
|
|
12
|
-
sarapy/mlProcessors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
|
-
sarapy/preprocessing/DistancesImputer.py,sha256=NvbVAh5m0yFxVgDbEFnEX7RSG13qLjO7i2gqjDAWsf4,9106
|
|
14
|
-
sarapy/preprocessing/FertilizerImputer.py,sha256=zK6ONAilwPHvj-bC7yxnQYOkDBCCkWh6__57vYK9anM,1490
|
|
15
|
-
sarapy/preprocessing/TransformInputData.py,sha256=hp2P0Jry7ORUU_N3bMlAz1sCTvD5Qwfm376O8xqwPEo,8539
|
|
16
|
-
sarapy/preprocessing/TransformToOutputData.py,sha256=2hSeFkrSt1OO_jiX4SQJtL3Dhm_9xLy7zCgkj8jo9OE,3137
|
|
17
|
-
sarapy/preprocessing/__init__.py,sha256=Wg_Csy8Xiz8BN8A4-T7iPwcL_ol5ApEx6YtybItKB8M,100
|
|
18
|
-
sarapy/stats/__init__.py,sha256=ZrLMSistwynmmx4HUcI-ePRzqQ4bjp85JT4fTmbzC-c,27
|
|
19
|
-
sarapy/stats/stats.py,sha256=raQBnn2RRtwYOuKN4Mgk6Rhk4hajx1TVcGlYnT2TMmA,11412
|
|
20
|
-
sarapy/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
21
|
-
sarapy/utils/amg_decoder.py,sha256=JZ7cbu7DlCuatuq2F7aBfUr7S7U-K5poBgxw5nY6rNI,4319
|
|
22
|
-
sarapy/utils/amg_ppk.py,sha256=c0GusnxdntU-E0JOezzbIfC7SWoJmKAbad_zYDCJ3-c,1060
|
|
23
|
-
sarapy/utils/getRawOperations.py,sha256=8aA1fIkNCnUYgiWfnFggRT_U35z432gZBrZ7seGO5w4,817
|
|
24
|
-
sarapy/utils/plotting.py,sha256=kX-eYw618urMcUBkNPviQZdBziDc_TR3GInTsO90kU4,4065
|
|
25
|
-
sarapy-2.2.0.dist-info/LICENCE,sha256=N00sU3vSQ6F5c2vML9_qP4IFTkCPFFj0YGDB2CZP-uQ,840
|
|
26
|
-
sarapy-2.2.0.dist-info/METADATA,sha256=4yYQOkyZXWyLc3UqIHtYbxMxpznXjRat6kGwHD60Jk0,5945
|
|
27
|
-
sarapy-2.2.0.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
|
|
28
|
-
sarapy-2.2.0.dist-info/top_level.txt,sha256=4mUGZXfX2Fw47fpY6MQkaJeuOs_8tbjLkkNp34DJWiA,7
|
|
29
|
-
sarapy-2.2.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|