tsp 1.8.1__py3-none-any.whl → 1.10.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tsp/__init__.py +11 -11
- tsp/__meta__.py +1 -1
- tsp/concatenation.py +159 -153
- tsp/core.py +1306 -1162
- tsp/data/2023-01-06_755-test-Dataset_2031-Constant_Over_Interval-Hourly-Ground_Temperature-Thermistor_Automated.timeserie.csv +4 -4
- tsp/data/2023-01-06_755-test.metadata.txt +208 -208
- tsp/data/NTGS_example_csv.csv +6 -6
- tsp/data/NTGS_example_slash_dates.csv +6 -6
- tsp/data/NTGS_gtr_example_excel.xlsx +0 -0
- tsp/data/example_geotop.csv +5240 -5240
- tsp/data/example_gtnp.csv +1298 -1298
- tsp/data/example_permos.csv +7 -7
- tsp/data/ntgs-db-multi.txt +3872 -0
- tsp/data/ntgs-db-single.txt +2251 -0
- tsp/data/test_geotop_has_space.txt +5 -5
- tsp/data/tsp_format_long.csv +10 -0
- tsp/data/tsp_format_wide_1.csv +7 -0
- tsp/data/tsp_format_wide_2.csv +7 -0
- tsp/dataloggers/AbstractReader.py +43 -43
- tsp/dataloggers/FG2.py +110 -110
- tsp/dataloggers/GP5W.py +114 -114
- tsp/dataloggers/Geoprecision.py +34 -34
- tsp/dataloggers/HOBO.py +930 -914
- tsp/dataloggers/RBRXL800.py +190 -190
- tsp/dataloggers/RBRXR420.py +371 -308
- tsp/dataloggers/Vemco.py +84 -0
- tsp/dataloggers/__init__.py +15 -15
- tsp/dataloggers/logr.py +196 -115
- tsp/dataloggers/test_files/004448.DAT +2543 -2543
- tsp/dataloggers/test_files/004531.DAT +17106 -17106
- tsp/dataloggers/test_files/004531.HEX +3587 -3587
- tsp/dataloggers/test_files/004534.HEX +3587 -3587
- tsp/dataloggers/test_files/010252.dat +1731 -1731
- tsp/dataloggers/test_files/010252.hex +1739 -1739
- tsp/dataloggers/test_files/010274.hex +1291 -1291
- tsp/dataloggers/test_files/010278.hex +3544 -3544
- tsp/dataloggers/test_files/012064.dat +1286 -1286
- tsp/dataloggers/test_files/012064.hex +1294 -1294
- tsp/dataloggers/test_files/012064_modified_start.hex +1294 -0
- tsp/dataloggers/test_files/012081.hex +3532 -3532
- tsp/dataloggers/test_files/013138_recovery_stamp.hex +1123 -0
- tsp/dataloggers/test_files/014037-2007.hex +95 -0
- tsp/dataloggers/test_files/019360_20160918_1146_SlumpIslandTopofHill.hex +11253 -0
- tsp/dataloggers/test_files/019360_20160918_1146_SlumpIslandTopofHill.xls +0 -0
- tsp/dataloggers/test_files/07B1592.DAT +1483 -1483
- tsp/dataloggers/test_files/07B1592.HEX +1806 -1806
- tsp/dataloggers/test_files/07B4450.DAT +2234 -2234
- tsp/dataloggers/test_files/07B4450.HEX +2559 -2559
- tsp/dataloggers/test_files/2022018_2025-09-18T22-16-16.txt +36 -0
- tsp/dataloggers/test_files/2022018_2025-09-18T22-16-16_raw.csv +2074 -0
- tsp/dataloggers/test_files/2022018_2025-09-18T22-16-16_temp.csv +2074 -0
- tsp/dataloggers/test_files/2025004_2025-12-02T17-07-28_cfg.txt +30 -0
- tsp/dataloggers/test_files/2025004_2025-12-02T17-07-28_raw.csv +35 -0
- tsp/dataloggers/test_files/2025004_2025-12-02T17-07-28_temp.csv +35 -0
- tsp/dataloggers/test_files/204087.xlsx +0 -0
- tsp/dataloggers/test_files/Asc-1455As02.000 +2982 -0
- tsp/dataloggers/test_files/Asc-1456As02.000 +2992 -0
- tsp/dataloggers/test_files/Asc-1457As02.000 +2917 -0
- tsp/dataloggers/test_files/BGC_BH15_019362_20140610_1253.hex +1729 -0
- tsp/dataloggers/test_files/Bin2944.csv +759 -0
- tsp/dataloggers/test_files/Bin5494.csv +2972 -0
- tsp/dataloggers/test_files/Bin6786.csv +272 -0
- tsp/dataloggers/test_files/FG2_399.csv +9881 -9881
- tsp/dataloggers/test_files/GP5W.csv +1121 -1121
- tsp/dataloggers/test_files/GP5W_260.csv +1884 -1884
- tsp/dataloggers/test_files/GP5W_270.csv +2210 -2210
- tsp/dataloggers/test_files/H08-030-08_HOBOware.csv +998 -998
- tsp/dataloggers/test_files/Minilog-II-T_350763_20190711_1.csv +2075 -0
- tsp/dataloggers/test_files/Minilog-II-T_350769_20190921_1.csv +6384 -0
- tsp/dataloggers/test_files/Minilog-II-T_354284_20190921_1.csv +4712 -0
- tsp/dataloggers/test_files/Minilog-T_7943_20140920_1.csv +5826 -0
- tsp/dataloggers/test_files/Minilog-T_8979_20140806_1.csv +2954 -0
- tsp/dataloggers/test_files/Minilog-T_975_20110824_1.csv +4343 -0
- tsp/dataloggers/test_files/RBR_01.dat +1046 -1046
- tsp/dataloggers/test_files/RBR_02.dat +2426 -2426
- tsp/dataloggers/test_files/RI03b_062831_20240905_1801.rsk +0 -0
- tsp/dataloggers/test_files/RI03b_062831_20240905_1801.xlsx +0 -0
- tsp/dataloggers/test_files/RSTDT2055.csv +2152 -2152
- tsp/dataloggers/test_files/U23-001_HOBOware.csv +1001 -1001
- tsp/dataloggers/test_files/hobo-negative-2.txt +6396 -6396
- tsp/dataloggers/test_files/hobo-negative-3.txt +5593 -5593
- tsp/dataloggers/test_files/hobo-positive-number-1.txt +1000 -1000
- tsp/dataloggers/test_files/hobo-positive-number-2.csv +1003 -1003
- tsp/dataloggers/test_files/hobo-positive-number-3.csv +1133 -1133
- tsp/dataloggers/test_files/hobo-positive-number-4.csv +1209 -1209
- tsp/dataloggers/test_files/hobo2.csv +8702 -8702
- tsp/dataloggers/test_files/hobo_1_AB.csv +21732 -21732
- tsp/dataloggers/test_files/hobo_1_AB_Details.txt +133 -133
- tsp/dataloggers/test_files/hobo_1_AB_classic.csv +4373 -4373
- tsp/dataloggers/test_files/hobo_1_AB_defaults.csv +21732 -21732
- tsp/dataloggers/test_files/hobo_1_AB_minimal.txt +1358 -1358
- tsp/dataloggers/test_files/hobo_1_AB_var2.csv +3189 -3189
- tsp/dataloggers/test_files/hobo_1_AB_var3.csv +2458 -2458
- tsp/dataloggers/test_files/logR_ULogC16-32_1.csv +106 -106
- tsp/dataloggers/test_files/logR_ULogC16-32_2.csv +100 -100
- tsp/dataloggers/test_files/mon_3_Ta_2010-08-18_2013-02-08.txt +21724 -21724
- tsp/dataloggers/test_files/rbr_001.dat +1133 -1133
- tsp/dataloggers/test_files/rbr_001.hex +1139 -1139
- tsp/dataloggers/test_files/rbr_001_no_comment.dat +1132 -1132
- tsp/dataloggers/test_files/rbr_001_no_comment.hex +1138 -1138
- tsp/dataloggers/test_files/rbr_002.dat +1179 -1179
- tsp/dataloggers/test_files/rbr_002.hex +1185 -1185
- tsp/dataloggers/test_files/rbr_003.hex +1292 -1292
- tsp/dataloggers/test_files/rbr_xl_001.DAT +1105 -1105
- tsp/dataloggers/test_files/rbr_xl_002.DAT +1126 -1126
- tsp/dataloggers/test_files/rbr_xl_003.DAT +4622 -4622
- tsp/dataloggers/test_files/rbr_xl_003.HEX +3587 -3587
- tsp/gtnp.py +148 -148
- tsp/labels.py +3 -3
- tsp/misc.py +90 -90
- tsp/physics.py +101 -101
- tsp/plots/static.py +388 -374
- tsp/readers.py +829 -548
- tsp/standardization/__init__.py +0 -0
- tsp/standardization/metadata.py +95 -0
- tsp/standardization/metadata_ref.py +0 -0
- tsp/standardization/validator.py +535 -0
- tsp/time.py +45 -45
- tsp/tspwarnings.py +27 -15
- tsp/utils.py +131 -101
- tsp/version.py +1 -1
- {tsp-1.8.1.dist-info → tsp-1.10.2.dist-info}/METADATA +95 -86
- tsp-1.10.2.dist-info/RECORD +132 -0
- {tsp-1.8.1.dist-info → tsp-1.10.2.dist-info}/licenses/LICENSE +674 -674
- {tsp-1.8.1.dist-info → tsp-1.10.2.dist-info}/top_level.txt +1 -0
- tsp-1.8.1.dist-info/RECORD +0 -94
- {tsp-1.8.1.dist-info → tsp-1.10.2.dist-info}/WHEEL +0 -0
tsp/dataloggers/RBRXL800.py
CHANGED
|
@@ -1,190 +1,190 @@
|
|
|
1
|
-
import pathlib
|
|
2
|
-
import warnings
|
|
3
|
-
import numpy as np
|
|
4
|
-
import pandas as pd
|
|
5
|
-
import datetime as dt
|
|
6
|
-
from .AbstractReader import AbstractReader
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
class RBRXL800(AbstractReader):
|
|
10
|
-
|
|
11
|
-
def read(self, file_path: str) -> "pd.DataFrame":
|
|
12
|
-
"""
|
|
13
|
-
|
|
14
|
-
Parameters
|
|
15
|
-
----------
|
|
16
|
-
file
|
|
17
|
-
|
|
18
|
-
Returns
|
|
19
|
-
-------
|
|
20
|
-
|
|
21
|
-
"""
|
|
22
|
-
file_extention = pathlib.Path(file_path).suffix.lower()
|
|
23
|
-
if file_extention not in [".dat", ".hex"]:
|
|
24
|
-
raise IOError("Unrecognised file. File is not a .dat or .hex")
|
|
25
|
-
|
|
26
|
-
with open(file_path, "r") as f:
|
|
27
|
-
header_lines = [next(f) for i in range(18)]
|
|
28
|
-
self._parse_meta(header_lines)
|
|
29
|
-
|
|
30
|
-
data_lines = f.readlines()
|
|
31
|
-
if file_extention == ".dat":
|
|
32
|
-
if data_lines[0] == "\n" or len(data_lines[0].split()) == self.META["
|
|
33
|
-
self._read_daily_dat_format(data_lines)
|
|
34
|
-
else:
|
|
35
|
-
if len(data_lines[0].split()) == 1 + self.META["
|
|
36
|
-
self._read_standard_dat_format(data_lines, True)
|
|
37
|
-
elif len(data_lines[0].split()) == self.META["
|
|
38
|
-
self._read_standard_dat_format(data_lines, False)
|
|
39
|
-
else:
|
|
40
|
-
raise RuntimeError("Error: Number of column names and number of columns do not match any"
|
|
41
|
-
"expected pattern.")
|
|
42
|
-
|
|
43
|
-
elif file_extention == ".hex":
|
|
44
|
-
self.META["
|
|
45
|
-
data_lines = data_lines[1:]
|
|
46
|
-
self._read_standard_hex_format(data_lines)
|
|
47
|
-
|
|
48
|
-
if len(self.DATA.index) != self.META["
|
|
49
|
-
warnings.warn(f"{file_path} Mismatch between number of samples in specified header "
|
|
50
|
-
f"({self.META['
|
|
51
|
-
"data may be missing")
|
|
52
|
-
return self.DATA
|
|
53
|
-
|
|
54
|
-
def _parse_meta(self, header_lines: list):
|
|
55
|
-
self.META["
|
|
56
|
-
self.META["
|
|
57
|
-
sample_interval = dt.datetime.strptime(header_lines[5].split()[-1], "%H:%M:%S")
|
|
58
|
-
self.META["
|
|
59
|
-
seconds=sample_interval.second)
|
|
60
|
-
# try:
|
|
61
|
-
self.META["
|
|
62
|
-
"""
|
|
63
|
-
except ValueError:
|
|
64
|
-
date = header_lines[3].split()[-2]
|
|
65
|
-
if "00" in date.split("/"):
|
|
66
|
-
warnings.warn("Invalid logging start date given in header. Logger may have experienced power issues and"
|
|
67
|
-
"data may be corrupt")"""
|
|
68
|
-
|
|
69
|
-
line_7_info = header_lines[6].split(",")
|
|
70
|
-
self.META["
|
|
71
|
-
self.META["
|
|
72
|
-
self.META["precision"] = int(header_lines[9].split("%")[1][-2])
|
|
73
|
-
|
|
74
|
-
self.META["
|
|
75
|
-
calibration_start_line = 10
|
|
76
|
-
for i in range(self.META["
|
|
77
|
-
self.META["
|
|
78
|
-
line_num = calibration_start_line + i
|
|
79
|
-
raw_calibration = header_lines[line_num].split()
|
|
80
|
-
if raw_calibration[1] != "2":
|
|
81
|
-
raise ValueError(f"Calibration equation #{raw_calibration[1]} currently unsupported.")
|
|
82
|
-
self.META["
|
|
83
|
-
self.META["
|
|
84
|
-
self.META["
|
|
85
|
-
if raw_calibration[5] == "0":
|
|
86
|
-
self.META["
|
|
87
|
-
else:
|
|
88
|
-
self.META["
|
|
89
|
-
self.META['raw'] = "".join(header_lines)
|
|
90
|
-
return
|
|
91
|
-
|
|
92
|
-
def _read_daily_dat_format(self, raw_data: list):
|
|
93
|
-
"""
|
|
94
|
-
|
|
95
|
-
Parameters
|
|
96
|
-
----------
|
|
97
|
-
raw_data
|
|
98
|
-
|
|
99
|
-
Returns
|
|
100
|
-
-------
|
|
101
|
-
|
|
102
|
-
"""
|
|
103
|
-
self.DATA = pd.DataFrame(columns=[f"
|
|
104
|
-
for line in raw_data:
|
|
105
|
-
if line != "\n":
|
|
106
|
-
if len(line) == 20 or len(line.split()) == self.META["
|
|
107
|
-
date_stamp = dt.datetime.strptime(" ".join(line.split()[0:2]), "%Y/%m/%d %H:%M:%S")
|
|
108
|
-
interval_num = 0
|
|
109
|
-
elif len(line.split()) == self.META["
|
|
110
|
-
self.DATA.loc[date_stamp + self.META["
|
|
111
|
-
interval_num += 1
|
|
112
|
-
else:
|
|
113
|
-
self.DATA.loc[date_stamp + self.META["
|
|
114
|
-
interval_num += 1
|
|
115
|
-
for col in self.DATA:
|
|
116
|
-
self.DATA[col] = pd.to_numeric(self.DATA[col], errors='coerce')
|
|
117
|
-
self.DATA.reset_index(inplace=True)
|
|
118
|
-
self.DATA.rename(columns={"index": "TIME"}, inplace=True)
|
|
119
|
-
return
|
|
120
|
-
|
|
121
|
-
def _read_standard_hex_format(self, raw_data: list):
|
|
122
|
-
byte_list = []
|
|
123
|
-
for line in raw_data:
|
|
124
|
-
eight_bytes = [line[i: i + 4] for i in range(0, len(line), 4)][:-1]
|
|
125
|
-
for byte in eight_bytes:
|
|
126
|
-
byte_list.append(byte)
|
|
127
|
-
byte_num = 0
|
|
128
|
-
self.DATA = pd.DataFrame(columns=[f"
|
|
129
|
-
line_num = 0
|
|
130
|
-
prev_line_day = 0
|
|
131
|
-
for line in range(self.META["
|
|
132
|
-
line_time = self.META["
|
|
133
|
-
if line_time.day != prev_line_day:
|
|
134
|
-
byte_num += 7
|
|
135
|
-
prev_line_day = line_time.day
|
|
136
|
-
line_bytes = byte_list[byte_num: byte_num + 8]
|
|
137
|
-
line_temps = []
|
|
138
|
-
for channel in range(len(line_bytes)):
|
|
139
|
-
hex_val = line_bytes[channel]
|
|
140
|
-
first_digit = hex_val[0]
|
|
141
|
-
if first_digit == "0":
|
|
142
|
-
data_val = -int(hex_val[1:], 16)
|
|
143
|
-
if first_digit == "2":
|
|
144
|
-
data_val = int(hex_val[1:], 16)
|
|
145
|
-
elif first_digit in ["1", "3"]:
|
|
146
|
-
data_val = np.nan
|
|
147
|
-
if not np.isnan(data_val) and data_val > 0:
|
|
148
|
-
a0 = self.META["
|
|
149
|
-
a1 = self.META["
|
|
150
|
-
a2 = self.META["
|
|
151
|
-
a3 = self.META["
|
|
152
|
-
y = a2 * ((2048 * (a3 / data_val)) - 1)
|
|
153
|
-
temp = (a1 / ((a1 / 273.15) - np.log(a0 / y))) - 273.15
|
|
154
|
-
line_temps.append(round(temp, self.META["precision"]))
|
|
155
|
-
else:
|
|
156
|
-
line_temps.append(np.nan)
|
|
157
|
-
self.DATA.loc[line_time] = line_temps
|
|
158
|
-
byte_num += 8
|
|
159
|
-
line_num += 1
|
|
160
|
-
for col in self.DATA:
|
|
161
|
-
self.DATA[col] = pd.to_numeric(self.DATA[col], errors='coerce')
|
|
162
|
-
self.DATA.reset_index(inplace=True)
|
|
163
|
-
self.DATA.rename(columns={"index": "TIME"}, inplace=True)
|
|
164
|
-
return
|
|
165
|
-
|
|
166
|
-
def _read_standard_dat_format(self, raw_data: list, line_numbers=False):
|
|
167
|
-
"""
|
|
168
|
-
|
|
169
|
-
Parameters
|
|
170
|
-
----------
|
|
171
|
-
raw_data
|
|
172
|
-
line_numbers
|
|
173
|
-
|
|
174
|
-
Returns
|
|
175
|
-
-------
|
|
176
|
-
|
|
177
|
-
"""
|
|
178
|
-
self.DATA = pd.DataFrame(columns=[f"
|
|
179
|
-
line_num = 0
|
|
180
|
-
for line in raw_data:
|
|
181
|
-
line_data = line.split()
|
|
182
|
-
if line_numbers:
|
|
183
|
-
line_data = line_data[1:]
|
|
184
|
-
self.DATA.loc[self.META["
|
|
185
|
-
line_num += 1
|
|
186
|
-
for col in self.DATA:
|
|
187
|
-
self.DATA[col] = pd.to_numeric(self.DATA[col], errors='coerce')
|
|
188
|
-
self.DATA.reset_index(inplace=True)
|
|
189
|
-
self.DATA.rename(columns={"index": "TIME"}, inplace=True)
|
|
190
|
-
return
|
|
1
|
+
import pathlib
|
|
2
|
+
import warnings
|
|
3
|
+
import numpy as np
|
|
4
|
+
import pandas as pd
|
|
5
|
+
import datetime as dt
|
|
6
|
+
from .AbstractReader import AbstractReader
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class RBRXL800(AbstractReader):
|
|
10
|
+
|
|
11
|
+
def read(self, file_path: str) -> "pd.DataFrame":
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
Parameters
|
|
15
|
+
----------
|
|
16
|
+
file
|
|
17
|
+
|
|
18
|
+
Returns
|
|
19
|
+
-------
|
|
20
|
+
|
|
21
|
+
"""
|
|
22
|
+
file_extention = pathlib.Path(file_path).suffix.lower()
|
|
23
|
+
if file_extention not in [".dat", ".hex"]:
|
|
24
|
+
raise IOError("Unrecognised file. File is not a .dat or .hex")
|
|
25
|
+
|
|
26
|
+
with open(file_path, "r") as f:
|
|
27
|
+
header_lines = [next(f) for i in range(18)]
|
|
28
|
+
self._parse_meta(header_lines)
|
|
29
|
+
|
|
30
|
+
data_lines = f.readlines()
|
|
31
|
+
if file_extention == ".dat":
|
|
32
|
+
if data_lines[0] == "\n" or len(data_lines[0].split()) == self.META["num_channels"] + 2:
|
|
33
|
+
self._read_daily_dat_format(data_lines)
|
|
34
|
+
else:
|
|
35
|
+
if len(data_lines[0].split()) == 1 + self.META["num_channels"]:
|
|
36
|
+
self._read_standard_dat_format(data_lines, True)
|
|
37
|
+
elif len(data_lines[0].split()) == self.META["num_channels"]:
|
|
38
|
+
self._read_standard_dat_format(data_lines, False)
|
|
39
|
+
else:
|
|
40
|
+
raise RuntimeError("Error: Number of column names and number of columns do not match any"
|
|
41
|
+
"expected pattern.")
|
|
42
|
+
|
|
43
|
+
elif file_extention == ".hex":
|
|
44
|
+
self.META["num_bytes"] = int(data_lines[0].split()[-1])
|
|
45
|
+
data_lines = data_lines[1:]
|
|
46
|
+
self._read_standard_hex_format(data_lines)
|
|
47
|
+
|
|
48
|
+
if len(self.DATA.index) != self.META["num_samples"]:
|
|
49
|
+
warnings.warn(f"{file_path} Mismatch between number of samples in specified header "
|
|
50
|
+
f"({self.META['num_samples']}) and number of samples read {len(self.DATA.index)}. Some "
|
|
51
|
+
"data may be missing")
|
|
52
|
+
return self.DATA
|
|
53
|
+
|
|
54
|
+
def _parse_meta(self, header_lines: list):
|
|
55
|
+
self.META["logger_model"] = header_lines[0].split()[1]
|
|
56
|
+
self.META["logger_sn"] = header_lines[0].split()[3]
|
|
57
|
+
sample_interval = dt.datetime.strptime(header_lines[5].split()[-1], "%H:%M:%S")
|
|
58
|
+
self.META["sample_interval"] = dt.timedelta(hours=sample_interval.hour, minutes=sample_interval.minute,
|
|
59
|
+
seconds=sample_interval.second)
|
|
60
|
+
# try:
|
|
61
|
+
self.META["logging_start"] = dt.datetime.strptime(" ".join(header_lines[3].split()[-2:]), "%y/%m/%d %H:%M:%S")
|
|
62
|
+
"""
|
|
63
|
+
except ValueError:
|
|
64
|
+
date = header_lines[3].split()[-2]
|
|
65
|
+
if "00" in date.split("/"):
|
|
66
|
+
warnings.warn("Invalid logging start date given in header. Logger may have experienced power issues and"
|
|
67
|
+
"data may be corrupt")"""
|
|
68
|
+
|
|
69
|
+
line_7_info = header_lines[6].split(",")
|
|
70
|
+
self.META["num_channels"] = int(line_7_info[0].split()[-1])
|
|
71
|
+
self.META["num_samples"] = int(line_7_info[1].split()[-1])
|
|
72
|
+
self.META["precision"] = int(header_lines[9].split("%")[1][-2])
|
|
73
|
+
|
|
74
|
+
self.META["calibration_parameters"] = {}
|
|
75
|
+
calibration_start_line = 10
|
|
76
|
+
for i in range(self.META["num_channels"]):
|
|
77
|
+
self.META["calibration_parameters"][f"channel_{i + 1}"] = {}
|
|
78
|
+
line_num = calibration_start_line + i
|
|
79
|
+
raw_calibration = header_lines[line_num].split()
|
|
80
|
+
if raw_calibration[1] != "2":
|
|
81
|
+
raise ValueError(f"Calibration equation #{raw_calibration[1]} currently unsupported.")
|
|
82
|
+
self.META["calibration_parameters"][f"channel_{i + 1}"]["a0"] = float(raw_calibration[2])
|
|
83
|
+
self.META["calibration_parameters"][f"channel_{i + 1}"]["a1"] = float(raw_calibration[3])
|
|
84
|
+
self.META["calibration_parameters"][f"channel_{i + 1}"]["a2"] = float(raw_calibration[4])
|
|
85
|
+
if raw_calibration[5] == "0":
|
|
86
|
+
self.META["calibration_parameters"][f"channel_{i + 1}"]["a3"] = 1
|
|
87
|
+
else:
|
|
88
|
+
self.META["calibration_parameters"][f"channel_{i + 1}"]["a3"] = float(raw_calibration[2])
|
|
89
|
+
self.META['raw'] = "".join(header_lines)
|
|
90
|
+
return
|
|
91
|
+
|
|
92
|
+
def _read_daily_dat_format(self, raw_data: list):
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
Parameters
|
|
96
|
+
----------
|
|
97
|
+
raw_data
|
|
98
|
+
|
|
99
|
+
Returns
|
|
100
|
+
-------
|
|
101
|
+
|
|
102
|
+
"""
|
|
103
|
+
self.DATA = pd.DataFrame(columns=[f"channel_{i + 1}" for i in range(self.META["num_channels"])])
|
|
104
|
+
for line in raw_data:
|
|
105
|
+
if line != "\n":
|
|
106
|
+
if len(line) == 20 or len(line.split()) == self.META["num_channels"] + 2:
|
|
107
|
+
date_stamp = dt.datetime.strptime(" ".join(line.split()[0:2]), "%Y/%m/%d %H:%M:%S")
|
|
108
|
+
interval_num = 0
|
|
109
|
+
elif len(line.split()) == self.META["num_channels"] + 1:
|
|
110
|
+
self.DATA.loc[date_stamp + self.META["sample_interval"] * interval_num] = line.split()[1:]
|
|
111
|
+
interval_num += 1
|
|
112
|
+
else:
|
|
113
|
+
self.DATA.loc[date_stamp + self.META["sample_interval"] * interval_num] = line.split()
|
|
114
|
+
interval_num += 1
|
|
115
|
+
for col in self.DATA:
|
|
116
|
+
self.DATA[col] = pd.to_numeric(self.DATA[col], errors='coerce')
|
|
117
|
+
self.DATA.reset_index(inplace=True)
|
|
118
|
+
self.DATA.rename(columns={"index": "TIME"}, inplace=True)
|
|
119
|
+
return
|
|
120
|
+
|
|
121
|
+
def _read_standard_hex_format(self, raw_data: list):
|
|
122
|
+
byte_list = []
|
|
123
|
+
for line in raw_data:
|
|
124
|
+
eight_bytes = [line[i: i + 4] for i in range(0, len(line), 4)][:-1]
|
|
125
|
+
for byte in eight_bytes:
|
|
126
|
+
byte_list.append(byte)
|
|
127
|
+
byte_num = 0
|
|
128
|
+
self.DATA = pd.DataFrame(columns=[f"channel_{i + 1}" for i in range(self.META["num_channels"])])
|
|
129
|
+
line_num = 0
|
|
130
|
+
prev_line_day = 0
|
|
131
|
+
for line in range(self.META["num_samples"]):
|
|
132
|
+
line_time = self.META["logging_start"] + self.META["sample_interval"] * line_num
|
|
133
|
+
if line_time.day != prev_line_day:
|
|
134
|
+
byte_num += 7
|
|
135
|
+
prev_line_day = line_time.day
|
|
136
|
+
line_bytes = byte_list[byte_num: byte_num + 8]
|
|
137
|
+
line_temps = []
|
|
138
|
+
for channel in range(len(line_bytes)):
|
|
139
|
+
hex_val = line_bytes[channel]
|
|
140
|
+
first_digit = hex_val[0]
|
|
141
|
+
if first_digit == "0":
|
|
142
|
+
data_val = -int(hex_val[1:], 16)
|
|
143
|
+
if first_digit == "2":
|
|
144
|
+
data_val = int(hex_val[1:], 16)
|
|
145
|
+
elif first_digit in ["1", "3"]:
|
|
146
|
+
data_val = np.nan
|
|
147
|
+
if not np.isnan(data_val) and data_val > 0:
|
|
148
|
+
a0 = self.META["calibration_parameters"][f"channel_{channel + 1}"]["a0"]
|
|
149
|
+
a1 = self.META["calibration_parameters"][f"channel_{channel + 1}"]["a1"]
|
|
150
|
+
a2 = self.META["calibration_parameters"][f"channel_{channel + 1}"]["a2"]
|
|
151
|
+
a3 = self.META["calibration_parameters"][f"channel_{channel + 1}"]["a3"]
|
|
152
|
+
y = a2 * ((2048 * (a3 / data_val)) - 1)
|
|
153
|
+
temp = (a1 / ((a1 / 273.15) - np.log(a0 / y))) - 273.15
|
|
154
|
+
line_temps.append(round(temp, self.META["precision"]))
|
|
155
|
+
else:
|
|
156
|
+
line_temps.append(np.nan)
|
|
157
|
+
self.DATA.loc[line_time] = line_temps
|
|
158
|
+
byte_num += 8
|
|
159
|
+
line_num += 1
|
|
160
|
+
for col in self.DATA:
|
|
161
|
+
self.DATA[col] = pd.to_numeric(self.DATA[col], errors='coerce')
|
|
162
|
+
self.DATA.reset_index(inplace=True)
|
|
163
|
+
self.DATA.rename(columns={"index": "TIME"}, inplace=True)
|
|
164
|
+
return
|
|
165
|
+
|
|
166
|
+
def _read_standard_dat_format(self, raw_data: list, line_numbers=False):
|
|
167
|
+
"""
|
|
168
|
+
|
|
169
|
+
Parameters
|
|
170
|
+
----------
|
|
171
|
+
raw_data
|
|
172
|
+
line_numbers
|
|
173
|
+
|
|
174
|
+
Returns
|
|
175
|
+
-------
|
|
176
|
+
|
|
177
|
+
"""
|
|
178
|
+
self.DATA = pd.DataFrame(columns=[f"channel_{i + 1}" for i in range(self.META["num_channels"])])
|
|
179
|
+
line_num = 0
|
|
180
|
+
for line in raw_data:
|
|
181
|
+
line_data = line.split()
|
|
182
|
+
if line_numbers:
|
|
183
|
+
line_data = line_data[1:]
|
|
184
|
+
self.DATA.loc[self.META["logging_start"] + self.META["sample_interval"] * line_num] = line_data
|
|
185
|
+
line_num += 1
|
|
186
|
+
for col in self.DATA:
|
|
187
|
+
self.DATA[col] = pd.to_numeric(self.DATA[col], errors='coerce')
|
|
188
|
+
self.DATA.reset_index(inplace=True)
|
|
189
|
+
self.DATA.rename(columns={"index": "TIME"}, inplace=True)
|
|
190
|
+
return
|