tsp 1.7.1__py3-none-any.whl → 1.7.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tsp might be problematic. Click here for more details.
- tsp/__init__.py +11 -11
- tsp/__meta__.py +1 -1
- tsp/core.py +1035 -1010
- tsp/data/2023-01-06_755-test-Dataset_2031-Constant_Over_Interval-Hourly-Ground_Temperature-Thermistor_Automated.timeserie.csv +4 -4
- tsp/data/2023-01-06_755-test.metadata.txt +208 -208
- tsp/data/NTGS_example_csv.csv +6 -0
- tsp/data/NTGS_example_slash_dates.csv +6 -0
- tsp/data/example_geotop.csv +5240 -5240
- tsp/data/example_gtnp.csv +1298 -1298
- tsp/data/example_permos.csv +8 -0
- tsp/data/test_geotop_has_space.txt +5 -0
- tsp/dataloggers/AbstractReader.py +43 -43
- tsp/dataloggers/FG2.py +110 -110
- tsp/dataloggers/GP5W.py +114 -114
- tsp/dataloggers/Geoprecision.py +34 -34
- tsp/dataloggers/HOBO.py +914 -914
- tsp/dataloggers/RBRXL800.py +190 -190
- tsp/dataloggers/RBRXR420.py +308 -307
- tsp/dataloggers/__init__.py +15 -15
- tsp/dataloggers/logr.py +115 -115
- tsp/dataloggers/test_files/004448.DAT +2543 -2543
- tsp/dataloggers/test_files/004531.DAT +17106 -17106
- tsp/dataloggers/test_files/004531.HEX +3587 -3587
- tsp/dataloggers/test_files/004534.HEX +3587 -3587
- tsp/dataloggers/test_files/010252.dat +1731 -1731
- tsp/dataloggers/test_files/010252.hex +1739 -1739
- tsp/dataloggers/test_files/010274.hex +1291 -1291
- tsp/dataloggers/test_files/010278.hex +3544 -3544
- tsp/dataloggers/test_files/012064.dat +1286 -1286
- tsp/dataloggers/test_files/012064.hex +1294 -1294
- tsp/dataloggers/test_files/012081.hex +3532 -3532
- tsp/dataloggers/test_files/07B1592.DAT +1483 -1483
- tsp/dataloggers/test_files/07B1592.HEX +1806 -1806
- tsp/dataloggers/test_files/07B4450.DAT +2234 -2234
- tsp/dataloggers/test_files/07B4450.HEX +2559 -2559
- tsp/dataloggers/test_files/CSc_CR1000_1.dat +295 -0
- tsp/dataloggers/test_files/FG2_399.csv +9881 -9881
- tsp/dataloggers/test_files/GP5W.csv +1121 -1121
- tsp/dataloggers/test_files/GP5W_260.csv +1884 -1884
- tsp/dataloggers/test_files/GP5W_270.csv +2210 -2210
- tsp/dataloggers/test_files/H08-030-08_HOBOware.csv +998 -998
- tsp/dataloggers/test_files/RBR_01.dat +1046 -1046
- tsp/dataloggers/test_files/RBR_02.dat +2426 -2426
- tsp/dataloggers/test_files/RSTDT2055.csv +2152 -2152
- tsp/dataloggers/test_files/U23-001_HOBOware.csv +1001 -1001
- tsp/dataloggers/test_files/hobo-negative-2.txt +6396 -6396
- tsp/dataloggers/test_files/hobo-negative-3.txt +5593 -5593
- tsp/dataloggers/test_files/hobo-positive-number-1.txt +1000 -1000
- tsp/dataloggers/test_files/hobo-positive-number-2.csv +1003 -1003
- tsp/dataloggers/test_files/hobo-positive-number-3.csv +1133 -1133
- tsp/dataloggers/test_files/hobo-positive-number-4.csv +1209 -1209
- tsp/dataloggers/test_files/hobo2.csv +8702 -8702
- tsp/dataloggers/test_files/hobo_1_AB.csv +21732 -21732
- tsp/dataloggers/test_files/hobo_1_AB_Details.txt +133 -133
- tsp/dataloggers/test_files/hobo_1_AB_classic.csv +4373 -4373
- tsp/dataloggers/test_files/hobo_1_AB_defaults.csv +21732 -21732
- tsp/dataloggers/test_files/hobo_1_AB_minimal.txt +1358 -1358
- tsp/dataloggers/test_files/hobo_1_AB_var2.csv +3189 -3189
- tsp/dataloggers/test_files/hobo_1_AB_var3.csv +2458 -2458
- tsp/dataloggers/test_files/logR_ULogC16-32_1.csv +106 -106
- tsp/dataloggers/test_files/logR_ULogC16-32_2.csv +100 -100
- tsp/dataloggers/test_files/mon_3_Ta_2010-08-18_2013-02-08.txt +21724 -21724
- tsp/dataloggers/test_files/rbr_001.dat +1133 -1133
- tsp/dataloggers/test_files/rbr_001.hex +1139 -1139
- tsp/dataloggers/test_files/rbr_001_no_comment.dat +1132 -1132
- tsp/dataloggers/test_files/rbr_001_no_comment.hex +1138 -1138
- tsp/dataloggers/test_files/rbr_002.dat +1179 -1179
- tsp/dataloggers/test_files/rbr_002.hex +1185 -1185
- tsp/dataloggers/test_files/rbr_003.hex +1292 -1292
- tsp/dataloggers/test_files/rbr_003.xls +0 -0
- tsp/dataloggers/test_files/rbr_xl_001.DAT +1105 -1105
- tsp/dataloggers/test_files/rbr_xl_002.DAT +1126 -1126
- tsp/dataloggers/test_files/rbr_xl_003.DAT +4622 -4622
- tsp/dataloggers/test_files/rbr_xl_003.HEX +3587 -3587
- tsp/gtnp.py +148 -141
- tsp/labels.py +3 -3
- tsp/misc.py +90 -90
- tsp/physics.py +101 -101
- tsp/plots/static.py +374 -305
- tsp/readers.py +548 -536
- tsp/scratch.py +6 -0
- tsp/time.py +45 -45
- tsp/tspwarnings.py +15 -0
- tsp/utils.py +101 -101
- tsp/version.py +1 -1
- {tsp-1.7.1.dist-info → tsp-1.7.7.dist-info}/LICENSE +674 -674
- {tsp-1.7.1.dist-info → tsp-1.7.7.dist-info}/METADATA +10 -6
- tsp-1.7.7.dist-info/RECORD +95 -0
- {tsp-1.7.1.dist-info → tsp-1.7.7.dist-info}/WHEEL +5 -5
- tsp-1.7.1.dist-info/RECORD +0 -88
- {tsp-1.7.1.dist-info → tsp-1.7.7.dist-info}/top_level.txt +0 -0
tsp/dataloggers/RBRXL800.py
CHANGED
|
@@ -1,190 +1,190 @@
|
|
|
1
|
-
import pathlib
|
|
2
|
-
import warnings
|
|
3
|
-
import numpy as np
|
|
4
|
-
import pandas as pd
|
|
5
|
-
import datetime as dt
|
|
6
|
-
from .AbstractReader import AbstractReader
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
class RBRXL800(AbstractReader):
|
|
10
|
-
|
|
11
|
-
def read(self, file_path: str) -> "pd.DataFrame":
|
|
12
|
-
"""
|
|
13
|
-
|
|
14
|
-
Parameters
|
|
15
|
-
----------
|
|
16
|
-
file
|
|
17
|
-
|
|
18
|
-
Returns
|
|
19
|
-
-------
|
|
20
|
-
|
|
21
|
-
"""
|
|
22
|
-
file_extention = pathlib.Path(file_path).suffix.lower()
|
|
23
|
-
if file_extention not in [".dat", ".hex"]:
|
|
24
|
-
raise IOError("Unrecognised file. File is not a .dat or .hex")
|
|
25
|
-
|
|
26
|
-
with open(file_path, "r") as f:
|
|
27
|
-
header_lines = [next(f) for i in range(18)]
|
|
28
|
-
self._parse_meta(header_lines)
|
|
29
|
-
|
|
30
|
-
data_lines = f.readlines()
|
|
31
|
-
if file_extention == ".dat":
|
|
32
|
-
if data_lines[0] == "\n" or len(data_lines[0].split()) == self.META["num channels"] + 2:
|
|
33
|
-
self._read_daily_dat_format(data_lines)
|
|
34
|
-
else:
|
|
35
|
-
if len(data_lines[0].split()) == 1 + self.META["num channels"]:
|
|
36
|
-
self._read_standard_dat_format(data_lines, True)
|
|
37
|
-
elif len(data_lines[0].split()) == self.META["num channels"]:
|
|
38
|
-
self._read_standard_dat_format(data_lines, False)
|
|
39
|
-
else:
|
|
40
|
-
raise RuntimeError("Error: Number of column names and number of columns do not match any"
|
|
41
|
-
"expected pattern.")
|
|
42
|
-
|
|
43
|
-
elif file_extention == ".hex":
|
|
44
|
-
self.META["num bytes"] = int(data_lines[0].split()[-1])
|
|
45
|
-
data_lines = data_lines[1:]
|
|
46
|
-
self._read_standard_hex_format(data_lines)
|
|
47
|
-
|
|
48
|
-
if len(self.DATA.index) != self.META["num samples"]:
|
|
49
|
-
warnings.warn(f"{file_path} Mismatch between number of samples in specified header "
|
|
50
|
-
f"({self.META['num samples']}) and number of samples read {len(self.DATA.index)}. Some "
|
|
51
|
-
"data may be missing")
|
|
52
|
-
return self.DATA
|
|
53
|
-
|
|
54
|
-
def _parse_meta(self, header_lines: list):
|
|
55
|
-
self.META["logger model"] = header_lines[0].split()[1]
|
|
56
|
-
self.META["logger SN"] = header_lines[0].split()[3]
|
|
57
|
-
sample_interval = dt.datetime.strptime(header_lines[5].split()[-1], "%H:%M:%S")
|
|
58
|
-
self.META["sample interval"] = dt.timedelta(hours=sample_interval.hour, minutes=sample_interval.minute,
|
|
59
|
-
seconds=sample_interval.second)
|
|
60
|
-
# try:
|
|
61
|
-
self.META["logging start"] = dt.datetime.strptime(" ".join(header_lines[3].split()[-2:]), "%y/%m/%d %H:%M:%S")
|
|
62
|
-
"""
|
|
63
|
-
except ValueError:
|
|
64
|
-
date = header_lines[3].split()[-2]
|
|
65
|
-
if "00" in date.split("/"):
|
|
66
|
-
warnings.warn("Invalid logging start date given in header. Logger may have experienced power issues and"
|
|
67
|
-
"data may be corrupt")"""
|
|
68
|
-
|
|
69
|
-
line_7_info = header_lines[6].split(",")
|
|
70
|
-
self.META["num channels"] = int(line_7_info[0].split()[-1])
|
|
71
|
-
self.META["num samples"] = int(line_7_info[1].split()[-1])
|
|
72
|
-
self.META["precision"] = int(header_lines[9].split("%")[1][-2])
|
|
73
|
-
|
|
74
|
-
self.META["calibration parameters"] = {}
|
|
75
|
-
calibration_start_line = 10
|
|
76
|
-
for i in range(self.META["num channels"]):
|
|
77
|
-
self.META["calibration parameters"][f"channel {i + 1}"] = {}
|
|
78
|
-
line_num = calibration_start_line + i
|
|
79
|
-
raw_calibration = header_lines[line_num].split()
|
|
80
|
-
if raw_calibration[1] != "2":
|
|
81
|
-
raise ValueError(f"Calibration equation #{raw_calibration[1]} currently unsupported.")
|
|
82
|
-
self.META["calibration parameters"][f"channel {i + 1}"]["a0"] = float(raw_calibration[2])
|
|
83
|
-
self.META["calibration parameters"][f"channel {i + 1}"]["a1"] = float(raw_calibration[3])
|
|
84
|
-
self.META["calibration parameters"][f"channel {i + 1}"]["a2"] = float(raw_calibration[4])
|
|
85
|
-
if raw_calibration[5] == "0":
|
|
86
|
-
self.META["calibration parameters"][f"channel {i + 1}"]["a3"] = 1
|
|
87
|
-
else:
|
|
88
|
-
self.META["calibration parameters"][f"channel {i + 1}"]["a3"] = float(raw_calibration[2])
|
|
89
|
-
self.META['raw'] = "".join(header_lines)
|
|
90
|
-
return
|
|
91
|
-
|
|
92
|
-
def _read_daily_dat_format(self, raw_data: list):
|
|
93
|
-
"""
|
|
94
|
-
|
|
95
|
-
Parameters
|
|
96
|
-
----------
|
|
97
|
-
raw_data
|
|
98
|
-
|
|
99
|
-
Returns
|
|
100
|
-
-------
|
|
101
|
-
|
|
102
|
-
"""
|
|
103
|
-
self.DATA = pd.DataFrame(columns=[f"channel {i + 1}" for i in range(self.META["num channels"])])
|
|
104
|
-
for line in raw_data:
|
|
105
|
-
if line != "\n":
|
|
106
|
-
if len(line) == 20 or len(line.split()) == self.META["num channels"] + 2:
|
|
107
|
-
date_stamp = dt.datetime.strptime(" ".join(line.split()[0:2]), "%Y/%m/%d %H:%M:%S")
|
|
108
|
-
interval_num = 0
|
|
109
|
-
elif len(line.split()) == self.META["num channels"] + 1:
|
|
110
|
-
self.DATA.loc[date_stamp + self.META["sample interval"] * interval_num] = line.split()[1:]
|
|
111
|
-
interval_num += 1
|
|
112
|
-
else:
|
|
113
|
-
self.DATA.loc[date_stamp + self.META["sample interval"] * interval_num] = line.split()
|
|
114
|
-
interval_num += 1
|
|
115
|
-
for col in self.DATA:
|
|
116
|
-
self.DATA[col] = pd.to_numeric(self.DATA[col], errors='coerce')
|
|
117
|
-
self.DATA.reset_index(inplace=True)
|
|
118
|
-
self.DATA.rename(columns={"index": "TIME"}, inplace=True)
|
|
119
|
-
return
|
|
120
|
-
|
|
121
|
-
def _read_standard_hex_format(self, raw_data: list):
|
|
122
|
-
byte_list = []
|
|
123
|
-
for line in raw_data:
|
|
124
|
-
eight_bytes = [line[i: i + 4] for i in range(0, len(line), 4)][:-1]
|
|
125
|
-
for byte in eight_bytes:
|
|
126
|
-
byte_list.append(byte)
|
|
127
|
-
byte_num = 0
|
|
128
|
-
self.DATA = pd.DataFrame(columns=[f"channel {i + 1}" for i in range(self.META["num channels"])])
|
|
129
|
-
line_num = 0
|
|
130
|
-
prev_line_day = 0
|
|
131
|
-
for line in range(self.META["num samples"]):
|
|
132
|
-
line_time = self.META["logging start"] + self.META["sample interval"] * line_num
|
|
133
|
-
if line_time.day != prev_line_day:
|
|
134
|
-
byte_num += 7
|
|
135
|
-
prev_line_day = line_time.day
|
|
136
|
-
line_bytes = byte_list[byte_num: byte_num + 8]
|
|
137
|
-
line_temps = []
|
|
138
|
-
for channel in range(len(line_bytes)):
|
|
139
|
-
hex_val = line_bytes[channel]
|
|
140
|
-
first_digit = hex_val[0]
|
|
141
|
-
if first_digit == "0":
|
|
142
|
-
data_val = -int(hex_val[1:], 16)
|
|
143
|
-
if first_digit == "2":
|
|
144
|
-
data_val = int(hex_val[1:], 16)
|
|
145
|
-
elif first_digit in ["1", "3"]:
|
|
146
|
-
data_val = np.nan
|
|
147
|
-
if not np.isnan(data_val) and data_val > 0:
|
|
148
|
-
a0 = self.META["calibration parameters"][f"channel {channel + 1}"]["a0"]
|
|
149
|
-
a1 = self.META["calibration parameters"][f"channel {channel + 1}"]["a1"]
|
|
150
|
-
a2 = self.META["calibration parameters"][f"channel {channel + 1}"]["a2"]
|
|
151
|
-
a3 = self.META["calibration parameters"][f"channel {channel + 1}"]["a3"]
|
|
152
|
-
y = a2 * ((2048 * (a3 / data_val)) - 1)
|
|
153
|
-
temp = (a1 / ((a1 / 273.15) - np.log(a0 / y))) - 273.15
|
|
154
|
-
line_temps.append(round(temp, self.META["precision"]))
|
|
155
|
-
else:
|
|
156
|
-
line_temps.append(np.nan)
|
|
157
|
-
self.DATA.loc[line_time] = line_temps
|
|
158
|
-
byte_num += 8
|
|
159
|
-
line_num += 1
|
|
160
|
-
for col in self.DATA:
|
|
161
|
-
self.DATA[col] = pd.to_numeric(self.DATA[col], errors='coerce')
|
|
162
|
-
self.DATA.reset_index(inplace=True)
|
|
163
|
-
self.DATA.rename(columns={"index": "TIME"}, inplace=True)
|
|
164
|
-
return
|
|
165
|
-
|
|
166
|
-
def _read_standard_dat_format(self, raw_data: list, line_numbers=False):
|
|
167
|
-
"""
|
|
168
|
-
|
|
169
|
-
Parameters
|
|
170
|
-
----------
|
|
171
|
-
raw_data
|
|
172
|
-
line_numbers
|
|
173
|
-
|
|
174
|
-
Returns
|
|
175
|
-
-------
|
|
176
|
-
|
|
177
|
-
"""
|
|
178
|
-
self.DATA = pd.DataFrame(columns=[f"channel {i + 1}" for i in range(self.META["num channels"])])
|
|
179
|
-
line_num = 0
|
|
180
|
-
for line in raw_data:
|
|
181
|
-
line_data = line.split()
|
|
182
|
-
if line_numbers:
|
|
183
|
-
line_data = line_data[1:]
|
|
184
|
-
self.DATA.loc[self.META["logging start"] + self.META["sample interval"] * line_num] = line_data
|
|
185
|
-
line_num += 1
|
|
186
|
-
for col in self.DATA:
|
|
187
|
-
self.DATA[col] = pd.to_numeric(self.DATA[col], errors='coerce')
|
|
188
|
-
self.DATA.reset_index(inplace=True)
|
|
189
|
-
self.DATA.rename(columns={"index": "TIME"}, inplace=True)
|
|
190
|
-
return
|
|
1
|
+
import pathlib
|
|
2
|
+
import warnings
|
|
3
|
+
import numpy as np
|
|
4
|
+
import pandas as pd
|
|
5
|
+
import datetime as dt
|
|
6
|
+
from .AbstractReader import AbstractReader
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class RBRXL800(AbstractReader):
|
|
10
|
+
|
|
11
|
+
def read(self, file_path: str) -> "pd.DataFrame":
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
Parameters
|
|
15
|
+
----------
|
|
16
|
+
file
|
|
17
|
+
|
|
18
|
+
Returns
|
|
19
|
+
-------
|
|
20
|
+
|
|
21
|
+
"""
|
|
22
|
+
file_extention = pathlib.Path(file_path).suffix.lower()
|
|
23
|
+
if file_extention not in [".dat", ".hex"]:
|
|
24
|
+
raise IOError("Unrecognised file. File is not a .dat or .hex")
|
|
25
|
+
|
|
26
|
+
with open(file_path, "r") as f:
|
|
27
|
+
header_lines = [next(f) for i in range(18)]
|
|
28
|
+
self._parse_meta(header_lines)
|
|
29
|
+
|
|
30
|
+
data_lines = f.readlines()
|
|
31
|
+
if file_extention == ".dat":
|
|
32
|
+
if data_lines[0] == "\n" or len(data_lines[0].split()) == self.META["num channels"] + 2:
|
|
33
|
+
self._read_daily_dat_format(data_lines)
|
|
34
|
+
else:
|
|
35
|
+
if len(data_lines[0].split()) == 1 + self.META["num channels"]:
|
|
36
|
+
self._read_standard_dat_format(data_lines, True)
|
|
37
|
+
elif len(data_lines[0].split()) == self.META["num channels"]:
|
|
38
|
+
self._read_standard_dat_format(data_lines, False)
|
|
39
|
+
else:
|
|
40
|
+
raise RuntimeError("Error: Number of column names and number of columns do not match any"
|
|
41
|
+
"expected pattern.")
|
|
42
|
+
|
|
43
|
+
elif file_extention == ".hex":
|
|
44
|
+
self.META["num bytes"] = int(data_lines[0].split()[-1])
|
|
45
|
+
data_lines = data_lines[1:]
|
|
46
|
+
self._read_standard_hex_format(data_lines)
|
|
47
|
+
|
|
48
|
+
if len(self.DATA.index) != self.META["num samples"]:
|
|
49
|
+
warnings.warn(f"{file_path} Mismatch between number of samples in specified header "
|
|
50
|
+
f"({self.META['num samples']}) and number of samples read {len(self.DATA.index)}. Some "
|
|
51
|
+
"data may be missing")
|
|
52
|
+
return self.DATA
|
|
53
|
+
|
|
54
|
+
def _parse_meta(self, header_lines: list):
|
|
55
|
+
self.META["logger model"] = header_lines[0].split()[1]
|
|
56
|
+
self.META["logger SN"] = header_lines[0].split()[3]
|
|
57
|
+
sample_interval = dt.datetime.strptime(header_lines[5].split()[-1], "%H:%M:%S")
|
|
58
|
+
self.META["sample interval"] = dt.timedelta(hours=sample_interval.hour, minutes=sample_interval.minute,
|
|
59
|
+
seconds=sample_interval.second)
|
|
60
|
+
# try:
|
|
61
|
+
self.META["logging start"] = dt.datetime.strptime(" ".join(header_lines[3].split()[-2:]), "%y/%m/%d %H:%M:%S")
|
|
62
|
+
"""
|
|
63
|
+
except ValueError:
|
|
64
|
+
date = header_lines[3].split()[-2]
|
|
65
|
+
if "00" in date.split("/"):
|
|
66
|
+
warnings.warn("Invalid logging start date given in header. Logger may have experienced power issues and"
|
|
67
|
+
"data may be corrupt")"""
|
|
68
|
+
|
|
69
|
+
line_7_info = header_lines[6].split(",")
|
|
70
|
+
self.META["num channels"] = int(line_7_info[0].split()[-1])
|
|
71
|
+
self.META["num samples"] = int(line_7_info[1].split()[-1])
|
|
72
|
+
self.META["precision"] = int(header_lines[9].split("%")[1][-2])
|
|
73
|
+
|
|
74
|
+
self.META["calibration parameters"] = {}
|
|
75
|
+
calibration_start_line = 10
|
|
76
|
+
for i in range(self.META["num channels"]):
|
|
77
|
+
self.META["calibration parameters"][f"channel {i + 1}"] = {}
|
|
78
|
+
line_num = calibration_start_line + i
|
|
79
|
+
raw_calibration = header_lines[line_num].split()
|
|
80
|
+
if raw_calibration[1] != "2":
|
|
81
|
+
raise ValueError(f"Calibration equation #{raw_calibration[1]} currently unsupported.")
|
|
82
|
+
self.META["calibration parameters"][f"channel {i + 1}"]["a0"] = float(raw_calibration[2])
|
|
83
|
+
self.META["calibration parameters"][f"channel {i + 1}"]["a1"] = float(raw_calibration[3])
|
|
84
|
+
self.META["calibration parameters"][f"channel {i + 1}"]["a2"] = float(raw_calibration[4])
|
|
85
|
+
if raw_calibration[5] == "0":
|
|
86
|
+
self.META["calibration parameters"][f"channel {i + 1}"]["a3"] = 1
|
|
87
|
+
else:
|
|
88
|
+
self.META["calibration parameters"][f"channel {i + 1}"]["a3"] = float(raw_calibration[2])
|
|
89
|
+
self.META['raw'] = "".join(header_lines)
|
|
90
|
+
return
|
|
91
|
+
|
|
92
|
+
def _read_daily_dat_format(self, raw_data: list):
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
Parameters
|
|
96
|
+
----------
|
|
97
|
+
raw_data
|
|
98
|
+
|
|
99
|
+
Returns
|
|
100
|
+
-------
|
|
101
|
+
|
|
102
|
+
"""
|
|
103
|
+
self.DATA = pd.DataFrame(columns=[f"channel {i + 1}" for i in range(self.META["num channels"])])
|
|
104
|
+
for line in raw_data:
|
|
105
|
+
if line != "\n":
|
|
106
|
+
if len(line) == 20 or len(line.split()) == self.META["num channels"] + 2:
|
|
107
|
+
date_stamp = dt.datetime.strptime(" ".join(line.split()[0:2]), "%Y/%m/%d %H:%M:%S")
|
|
108
|
+
interval_num = 0
|
|
109
|
+
elif len(line.split()) == self.META["num channels"] + 1:
|
|
110
|
+
self.DATA.loc[date_stamp + self.META["sample interval"] * interval_num] = line.split()[1:]
|
|
111
|
+
interval_num += 1
|
|
112
|
+
else:
|
|
113
|
+
self.DATA.loc[date_stamp + self.META["sample interval"] * interval_num] = line.split()
|
|
114
|
+
interval_num += 1
|
|
115
|
+
for col in self.DATA:
|
|
116
|
+
self.DATA[col] = pd.to_numeric(self.DATA[col], errors='coerce')
|
|
117
|
+
self.DATA.reset_index(inplace=True)
|
|
118
|
+
self.DATA.rename(columns={"index": "TIME"}, inplace=True)
|
|
119
|
+
return
|
|
120
|
+
|
|
121
|
+
def _read_standard_hex_format(self, raw_data: list):
|
|
122
|
+
byte_list = []
|
|
123
|
+
for line in raw_data:
|
|
124
|
+
eight_bytes = [line[i: i + 4] for i in range(0, len(line), 4)][:-1]
|
|
125
|
+
for byte in eight_bytes:
|
|
126
|
+
byte_list.append(byte)
|
|
127
|
+
byte_num = 0
|
|
128
|
+
self.DATA = pd.DataFrame(columns=[f"channel {i + 1}" for i in range(self.META["num channels"])])
|
|
129
|
+
line_num = 0
|
|
130
|
+
prev_line_day = 0
|
|
131
|
+
for line in range(self.META["num samples"]):
|
|
132
|
+
line_time = self.META["logging start"] + self.META["sample interval"] * line_num
|
|
133
|
+
if line_time.day != prev_line_day:
|
|
134
|
+
byte_num += 7
|
|
135
|
+
prev_line_day = line_time.day
|
|
136
|
+
line_bytes = byte_list[byte_num: byte_num + 8]
|
|
137
|
+
line_temps = []
|
|
138
|
+
for channel in range(len(line_bytes)):
|
|
139
|
+
hex_val = line_bytes[channel]
|
|
140
|
+
first_digit = hex_val[0]
|
|
141
|
+
if first_digit == "0":
|
|
142
|
+
data_val = -int(hex_val[1:], 16)
|
|
143
|
+
if first_digit == "2":
|
|
144
|
+
data_val = int(hex_val[1:], 16)
|
|
145
|
+
elif first_digit in ["1", "3"]:
|
|
146
|
+
data_val = np.nan
|
|
147
|
+
if not np.isnan(data_val) and data_val > 0:
|
|
148
|
+
a0 = self.META["calibration parameters"][f"channel {channel + 1}"]["a0"]
|
|
149
|
+
a1 = self.META["calibration parameters"][f"channel {channel + 1}"]["a1"]
|
|
150
|
+
a2 = self.META["calibration parameters"][f"channel {channel + 1}"]["a2"]
|
|
151
|
+
a3 = self.META["calibration parameters"][f"channel {channel + 1}"]["a3"]
|
|
152
|
+
y = a2 * ((2048 * (a3 / data_val)) - 1)
|
|
153
|
+
temp = (a1 / ((a1 / 273.15) - np.log(a0 / y))) - 273.15
|
|
154
|
+
line_temps.append(round(temp, self.META["precision"]))
|
|
155
|
+
else:
|
|
156
|
+
line_temps.append(np.nan)
|
|
157
|
+
self.DATA.loc[line_time] = line_temps
|
|
158
|
+
byte_num += 8
|
|
159
|
+
line_num += 1
|
|
160
|
+
for col in self.DATA:
|
|
161
|
+
self.DATA[col] = pd.to_numeric(self.DATA[col], errors='coerce')
|
|
162
|
+
self.DATA.reset_index(inplace=True)
|
|
163
|
+
self.DATA.rename(columns={"index": "TIME"}, inplace=True)
|
|
164
|
+
return
|
|
165
|
+
|
|
166
|
+
def _read_standard_dat_format(self, raw_data: list, line_numbers=False):
|
|
167
|
+
"""
|
|
168
|
+
|
|
169
|
+
Parameters
|
|
170
|
+
----------
|
|
171
|
+
raw_data
|
|
172
|
+
line_numbers
|
|
173
|
+
|
|
174
|
+
Returns
|
|
175
|
+
-------
|
|
176
|
+
|
|
177
|
+
"""
|
|
178
|
+
self.DATA = pd.DataFrame(columns=[f"channel {i + 1}" for i in range(self.META["num channels"])])
|
|
179
|
+
line_num = 0
|
|
180
|
+
for line in raw_data:
|
|
181
|
+
line_data = line.split()
|
|
182
|
+
if line_numbers:
|
|
183
|
+
line_data = line_data[1:]
|
|
184
|
+
self.DATA.loc[self.META["logging start"] + self.META["sample interval"] * line_num] = line_data
|
|
185
|
+
line_num += 1
|
|
186
|
+
for col in self.DATA:
|
|
187
|
+
self.DATA[col] = pd.to_numeric(self.DATA[col], errors='coerce')
|
|
188
|
+
self.DATA.reset_index(inplace=True)
|
|
189
|
+
self.DATA.rename(columns={"index": "TIME"}, inplace=True)
|
|
190
|
+
return
|