tsp 1.8.1__py3-none-any.whl → 1.10.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. tsp/__init__.py +11 -11
  2. tsp/__meta__.py +1 -1
  3. tsp/concatenation.py +159 -153
  4. tsp/core.py +1306 -1162
  5. tsp/data/2023-01-06_755-test-Dataset_2031-Constant_Over_Interval-Hourly-Ground_Temperature-Thermistor_Automated.timeserie.csv +4 -4
  6. tsp/data/2023-01-06_755-test.metadata.txt +208 -208
  7. tsp/data/NTGS_example_csv.csv +6 -6
  8. tsp/data/NTGS_example_slash_dates.csv +6 -6
  9. tsp/data/NTGS_gtr_example_excel.xlsx +0 -0
  10. tsp/data/example_geotop.csv +5240 -5240
  11. tsp/data/example_gtnp.csv +1298 -1298
  12. tsp/data/example_permos.csv +7 -7
  13. tsp/data/ntgs-db-multi.txt +3872 -0
  14. tsp/data/ntgs-db-single.txt +2251 -0
  15. tsp/data/test_geotop_has_space.txt +5 -5
  16. tsp/data/tsp_format_long.csv +10 -0
  17. tsp/data/tsp_format_wide_1.csv +7 -0
  18. tsp/data/tsp_format_wide_2.csv +7 -0
  19. tsp/dataloggers/AbstractReader.py +43 -43
  20. tsp/dataloggers/FG2.py +110 -110
  21. tsp/dataloggers/GP5W.py +114 -114
  22. tsp/dataloggers/Geoprecision.py +34 -34
  23. tsp/dataloggers/HOBO.py +930 -914
  24. tsp/dataloggers/RBRXL800.py +190 -190
  25. tsp/dataloggers/RBRXR420.py +371 -308
  26. tsp/dataloggers/Vemco.py +84 -0
  27. tsp/dataloggers/__init__.py +15 -15
  28. tsp/dataloggers/logr.py +196 -115
  29. tsp/dataloggers/test_files/004448.DAT +2543 -2543
  30. tsp/dataloggers/test_files/004531.DAT +17106 -17106
  31. tsp/dataloggers/test_files/004531.HEX +3587 -3587
  32. tsp/dataloggers/test_files/004534.HEX +3587 -3587
  33. tsp/dataloggers/test_files/010252.dat +1731 -1731
  34. tsp/dataloggers/test_files/010252.hex +1739 -1739
  35. tsp/dataloggers/test_files/010274.hex +1291 -1291
  36. tsp/dataloggers/test_files/010278.hex +3544 -3544
  37. tsp/dataloggers/test_files/012064.dat +1286 -1286
  38. tsp/dataloggers/test_files/012064.hex +1294 -1294
  39. tsp/dataloggers/test_files/012064_modified_start.hex +1294 -0
  40. tsp/dataloggers/test_files/012081.hex +3532 -3532
  41. tsp/dataloggers/test_files/013138_recovery_stamp.hex +1123 -0
  42. tsp/dataloggers/test_files/014037-2007.hex +95 -0
  43. tsp/dataloggers/test_files/019360_20160918_1146_SlumpIslandTopofHill.hex +11253 -0
  44. tsp/dataloggers/test_files/019360_20160918_1146_SlumpIslandTopofHill.xls +0 -0
  45. tsp/dataloggers/test_files/07B1592.DAT +1483 -1483
  46. tsp/dataloggers/test_files/07B1592.HEX +1806 -1806
  47. tsp/dataloggers/test_files/07B4450.DAT +2234 -2234
  48. tsp/dataloggers/test_files/07B4450.HEX +2559 -2559
  49. tsp/dataloggers/test_files/2022018_2025-09-18T22-16-16.txt +36 -0
  50. tsp/dataloggers/test_files/2022018_2025-09-18T22-16-16_raw.csv +2074 -0
  51. tsp/dataloggers/test_files/2022018_2025-09-18T22-16-16_temp.csv +2074 -0
  52. tsp/dataloggers/test_files/2025004_2025-12-02T17-07-28_cfg.txt +30 -0
  53. tsp/dataloggers/test_files/2025004_2025-12-02T17-07-28_raw.csv +35 -0
  54. tsp/dataloggers/test_files/2025004_2025-12-02T17-07-28_temp.csv +35 -0
  55. tsp/dataloggers/test_files/204087.xlsx +0 -0
  56. tsp/dataloggers/test_files/Asc-1455As02.000 +2982 -0
  57. tsp/dataloggers/test_files/Asc-1456As02.000 +2992 -0
  58. tsp/dataloggers/test_files/Asc-1457As02.000 +2917 -0
  59. tsp/dataloggers/test_files/BGC_BH15_019362_20140610_1253.hex +1729 -0
  60. tsp/dataloggers/test_files/Bin2944.csv +759 -0
  61. tsp/dataloggers/test_files/Bin5494.csv +2972 -0
  62. tsp/dataloggers/test_files/Bin6786.csv +272 -0
  63. tsp/dataloggers/test_files/FG2_399.csv +9881 -9881
  64. tsp/dataloggers/test_files/GP5W.csv +1121 -1121
  65. tsp/dataloggers/test_files/GP5W_260.csv +1884 -1884
  66. tsp/dataloggers/test_files/GP5W_270.csv +2210 -2210
  67. tsp/dataloggers/test_files/H08-030-08_HOBOware.csv +998 -998
  68. tsp/dataloggers/test_files/Minilog-II-T_350763_20190711_1.csv +2075 -0
  69. tsp/dataloggers/test_files/Minilog-II-T_350769_20190921_1.csv +6384 -0
  70. tsp/dataloggers/test_files/Minilog-II-T_354284_20190921_1.csv +4712 -0
  71. tsp/dataloggers/test_files/Minilog-T_7943_20140920_1.csv +5826 -0
  72. tsp/dataloggers/test_files/Minilog-T_8979_20140806_1.csv +2954 -0
  73. tsp/dataloggers/test_files/Minilog-T_975_20110824_1.csv +4343 -0
  74. tsp/dataloggers/test_files/RBR_01.dat +1046 -1046
  75. tsp/dataloggers/test_files/RBR_02.dat +2426 -2426
  76. tsp/dataloggers/test_files/RI03b_062831_20240905_1801.rsk +0 -0
  77. tsp/dataloggers/test_files/RI03b_062831_20240905_1801.xlsx +0 -0
  78. tsp/dataloggers/test_files/RSTDT2055.csv +2152 -2152
  79. tsp/dataloggers/test_files/U23-001_HOBOware.csv +1001 -1001
  80. tsp/dataloggers/test_files/hobo-negative-2.txt +6396 -6396
  81. tsp/dataloggers/test_files/hobo-negative-3.txt +5593 -5593
  82. tsp/dataloggers/test_files/hobo-positive-number-1.txt +1000 -1000
  83. tsp/dataloggers/test_files/hobo-positive-number-2.csv +1003 -1003
  84. tsp/dataloggers/test_files/hobo-positive-number-3.csv +1133 -1133
  85. tsp/dataloggers/test_files/hobo-positive-number-4.csv +1209 -1209
  86. tsp/dataloggers/test_files/hobo2.csv +8702 -8702
  87. tsp/dataloggers/test_files/hobo_1_AB.csv +21732 -21732
  88. tsp/dataloggers/test_files/hobo_1_AB_Details.txt +133 -133
  89. tsp/dataloggers/test_files/hobo_1_AB_classic.csv +4373 -4373
  90. tsp/dataloggers/test_files/hobo_1_AB_defaults.csv +21732 -21732
  91. tsp/dataloggers/test_files/hobo_1_AB_minimal.txt +1358 -1358
  92. tsp/dataloggers/test_files/hobo_1_AB_var2.csv +3189 -3189
  93. tsp/dataloggers/test_files/hobo_1_AB_var3.csv +2458 -2458
  94. tsp/dataloggers/test_files/logR_ULogC16-32_1.csv +106 -106
  95. tsp/dataloggers/test_files/logR_ULogC16-32_2.csv +100 -100
  96. tsp/dataloggers/test_files/mon_3_Ta_2010-08-18_2013-02-08.txt +21724 -21724
  97. tsp/dataloggers/test_files/rbr_001.dat +1133 -1133
  98. tsp/dataloggers/test_files/rbr_001.hex +1139 -1139
  99. tsp/dataloggers/test_files/rbr_001_no_comment.dat +1132 -1132
  100. tsp/dataloggers/test_files/rbr_001_no_comment.hex +1138 -1138
  101. tsp/dataloggers/test_files/rbr_002.dat +1179 -1179
  102. tsp/dataloggers/test_files/rbr_002.hex +1185 -1185
  103. tsp/dataloggers/test_files/rbr_003.hex +1292 -1292
  104. tsp/dataloggers/test_files/rbr_xl_001.DAT +1105 -1105
  105. tsp/dataloggers/test_files/rbr_xl_002.DAT +1126 -1126
  106. tsp/dataloggers/test_files/rbr_xl_003.DAT +4622 -4622
  107. tsp/dataloggers/test_files/rbr_xl_003.HEX +3587 -3587
  108. tsp/gtnp.py +148 -148
  109. tsp/labels.py +3 -3
  110. tsp/misc.py +90 -90
  111. tsp/physics.py +101 -101
  112. tsp/plots/static.py +388 -374
  113. tsp/readers.py +829 -548
  114. tsp/standardization/__init__.py +0 -0
  115. tsp/standardization/metadata.py +95 -0
  116. tsp/standardization/metadata_ref.py +0 -0
  117. tsp/standardization/validator.py +535 -0
  118. tsp/time.py +45 -45
  119. tsp/tspwarnings.py +27 -15
  120. tsp/utils.py +131 -101
  121. tsp/version.py +1 -1
  122. {tsp-1.8.1.dist-info → tsp-1.10.2.dist-info}/METADATA +95 -86
  123. tsp-1.10.2.dist-info/RECORD +132 -0
  124. {tsp-1.8.1.dist-info → tsp-1.10.2.dist-info}/licenses/LICENSE +674 -674
  125. {tsp-1.8.1.dist-info → tsp-1.10.2.dist-info}/top_level.txt +1 -0
  126. tsp-1.8.1.dist-info/RECORD +0 -94
  127. {tsp-1.8.1.dist-info → tsp-1.10.2.dist-info}/WHEEL +0 -0
@@ -1,190 +1,190 @@
1
- import pathlib
2
- import warnings
3
- import numpy as np
4
- import pandas as pd
5
- import datetime as dt
6
- from .AbstractReader import AbstractReader
7
-
8
-
9
- class RBRXL800(AbstractReader):
10
-
11
- def read(self, file_path: str) -> "pd.DataFrame":
12
- """
13
-
14
- Parameters
15
- ----------
16
- file
17
-
18
- Returns
19
- -------
20
-
21
- """
22
- file_extention = pathlib.Path(file_path).suffix.lower()
23
- if file_extention not in [".dat", ".hex"]:
24
- raise IOError("Unrecognised file. File is not a .dat or .hex")
25
-
26
- with open(file_path, "r") as f:
27
- header_lines = [next(f) for i in range(18)]
28
- self._parse_meta(header_lines)
29
-
30
- data_lines = f.readlines()
31
- if file_extention == ".dat":
32
- if data_lines[0] == "\n" or len(data_lines[0].split()) == self.META["num channels"] + 2:
33
- self._read_daily_dat_format(data_lines)
34
- else:
35
- if len(data_lines[0].split()) == 1 + self.META["num channels"]:
36
- self._read_standard_dat_format(data_lines, True)
37
- elif len(data_lines[0].split()) == self.META["num channels"]:
38
- self._read_standard_dat_format(data_lines, False)
39
- else:
40
- raise RuntimeError("Error: Number of column names and number of columns do not match any"
41
- "expected pattern.")
42
-
43
- elif file_extention == ".hex":
44
- self.META["num bytes"] = int(data_lines[0].split()[-1])
45
- data_lines = data_lines[1:]
46
- self._read_standard_hex_format(data_lines)
47
-
48
- if len(self.DATA.index) != self.META["num samples"]:
49
- warnings.warn(f"{file_path} Mismatch between number of samples in specified header "
50
- f"({self.META['num samples']}) and number of samples read {len(self.DATA.index)}. Some "
51
- "data may be missing")
52
- return self.DATA
53
-
54
- def _parse_meta(self, header_lines: list):
55
- self.META["logger model"] = header_lines[0].split()[1]
56
- self.META["logger SN"] = header_lines[0].split()[3]
57
- sample_interval = dt.datetime.strptime(header_lines[5].split()[-1], "%H:%M:%S")
58
- self.META["sample interval"] = dt.timedelta(hours=sample_interval.hour, minutes=sample_interval.minute,
59
- seconds=sample_interval.second)
60
- # try:
61
- self.META["logging start"] = dt.datetime.strptime(" ".join(header_lines[3].split()[-2:]), "%y/%m/%d %H:%M:%S")
62
- """
63
- except ValueError:
64
- date = header_lines[3].split()[-2]
65
- if "00" in date.split("/"):
66
- warnings.warn("Invalid logging start date given in header. Logger may have experienced power issues and"
67
- "data may be corrupt")"""
68
-
69
- line_7_info = header_lines[6].split(",")
70
- self.META["num channels"] = int(line_7_info[0].split()[-1])
71
- self.META["num samples"] = int(line_7_info[1].split()[-1])
72
- self.META["precision"] = int(header_lines[9].split("%")[1][-2])
73
-
74
- self.META["calibration parameters"] = {}
75
- calibration_start_line = 10
76
- for i in range(self.META["num channels"]):
77
- self.META["calibration parameters"][f"channel {i + 1}"] = {}
78
- line_num = calibration_start_line + i
79
- raw_calibration = header_lines[line_num].split()
80
- if raw_calibration[1] != "2":
81
- raise ValueError(f"Calibration equation #{raw_calibration[1]} currently unsupported.")
82
- self.META["calibration parameters"][f"channel {i + 1}"]["a0"] = float(raw_calibration[2])
83
- self.META["calibration parameters"][f"channel {i + 1}"]["a1"] = float(raw_calibration[3])
84
- self.META["calibration parameters"][f"channel {i + 1}"]["a2"] = float(raw_calibration[4])
85
- if raw_calibration[5] == "0":
86
- self.META["calibration parameters"][f"channel {i + 1}"]["a3"] = 1
87
- else:
88
- self.META["calibration parameters"][f"channel {i + 1}"]["a3"] = float(raw_calibration[2])
89
- self.META['raw'] = "".join(header_lines)
90
- return
91
-
92
- def _read_daily_dat_format(self, raw_data: list):
93
- """
94
-
95
- Parameters
96
- ----------
97
- raw_data
98
-
99
- Returns
100
- -------
101
-
102
- """
103
- self.DATA = pd.DataFrame(columns=[f"channel {i + 1}" for i in range(self.META["num channels"])])
104
- for line in raw_data:
105
- if line != "\n":
106
- if len(line) == 20 or len(line.split()) == self.META["num channels"] + 2:
107
- date_stamp = dt.datetime.strptime(" ".join(line.split()[0:2]), "%Y/%m/%d %H:%M:%S")
108
- interval_num = 0
109
- elif len(line.split()) == self.META["num channels"] + 1:
110
- self.DATA.loc[date_stamp + self.META["sample interval"] * interval_num] = line.split()[1:]
111
- interval_num += 1
112
- else:
113
- self.DATA.loc[date_stamp + self.META["sample interval"] * interval_num] = line.split()
114
- interval_num += 1
115
- for col in self.DATA:
116
- self.DATA[col] = pd.to_numeric(self.DATA[col], errors='coerce')
117
- self.DATA.reset_index(inplace=True)
118
- self.DATA.rename(columns={"index": "TIME"}, inplace=True)
119
- return
120
-
121
- def _read_standard_hex_format(self, raw_data: list):
122
- byte_list = []
123
- for line in raw_data:
124
- eight_bytes = [line[i: i + 4] for i in range(0, len(line), 4)][:-1]
125
- for byte in eight_bytes:
126
- byte_list.append(byte)
127
- byte_num = 0
128
- self.DATA = pd.DataFrame(columns=[f"channel {i + 1}" for i in range(self.META["num channels"])])
129
- line_num = 0
130
- prev_line_day = 0
131
- for line in range(self.META["num samples"]):
132
- line_time = self.META["logging start"] + self.META["sample interval"] * line_num
133
- if line_time.day != prev_line_day:
134
- byte_num += 7
135
- prev_line_day = line_time.day
136
- line_bytes = byte_list[byte_num: byte_num + 8]
137
- line_temps = []
138
- for channel in range(len(line_bytes)):
139
- hex_val = line_bytes[channel]
140
- first_digit = hex_val[0]
141
- if first_digit == "0":
142
- data_val = -int(hex_val[1:], 16)
143
- if first_digit == "2":
144
- data_val = int(hex_val[1:], 16)
145
- elif first_digit in ["1", "3"]:
146
- data_val = np.nan
147
- if not np.isnan(data_val) and data_val > 0:
148
- a0 = self.META["calibration parameters"][f"channel {channel + 1}"]["a0"]
149
- a1 = self.META["calibration parameters"][f"channel {channel + 1}"]["a1"]
150
- a2 = self.META["calibration parameters"][f"channel {channel + 1}"]["a2"]
151
- a3 = self.META["calibration parameters"][f"channel {channel + 1}"]["a3"]
152
- y = a2 * ((2048 * (a3 / data_val)) - 1)
153
- temp = (a1 / ((a1 / 273.15) - np.log(a0 / y))) - 273.15
154
- line_temps.append(round(temp, self.META["precision"]))
155
- else:
156
- line_temps.append(np.nan)
157
- self.DATA.loc[line_time] = line_temps
158
- byte_num += 8
159
- line_num += 1
160
- for col in self.DATA:
161
- self.DATA[col] = pd.to_numeric(self.DATA[col], errors='coerce')
162
- self.DATA.reset_index(inplace=True)
163
- self.DATA.rename(columns={"index": "TIME"}, inplace=True)
164
- return
165
-
166
- def _read_standard_dat_format(self, raw_data: list, line_numbers=False):
167
- """
168
-
169
- Parameters
170
- ----------
171
- raw_data
172
- line_numbers
173
-
174
- Returns
175
- -------
176
-
177
- """
178
- self.DATA = pd.DataFrame(columns=[f"channel {i + 1}" for i in range(self.META["num channels"])])
179
- line_num = 0
180
- for line in raw_data:
181
- line_data = line.split()
182
- if line_numbers:
183
- line_data = line_data[1:]
184
- self.DATA.loc[self.META["logging start"] + self.META["sample interval"] * line_num] = line_data
185
- line_num += 1
186
- for col in self.DATA:
187
- self.DATA[col] = pd.to_numeric(self.DATA[col], errors='coerce')
188
- self.DATA.reset_index(inplace=True)
189
- self.DATA.rename(columns={"index": "TIME"}, inplace=True)
190
- return
1
+ import pathlib
2
+ import warnings
3
+ import numpy as np
4
+ import pandas as pd
5
+ import datetime as dt
6
+ from .AbstractReader import AbstractReader
7
+
8
+
9
+ class RBRXL800(AbstractReader):
10
+
11
+ def read(self, file_path: str) -> "pd.DataFrame":
12
+ """
13
+
14
+ Parameters
15
+ ----------
16
+ file
17
+
18
+ Returns
19
+ -------
20
+
21
+ """
22
+ file_extention = pathlib.Path(file_path).suffix.lower()
23
+ if file_extention not in [".dat", ".hex"]:
24
+ raise IOError("Unrecognised file. File is not a .dat or .hex")
25
+
26
+ with open(file_path, "r") as f:
27
+ header_lines = [next(f) for i in range(18)]
28
+ self._parse_meta(header_lines)
29
+
30
+ data_lines = f.readlines()
31
+ if file_extention == ".dat":
32
+ if data_lines[0] == "\n" or len(data_lines[0].split()) == self.META["num_channels"] + 2:
33
+ self._read_daily_dat_format(data_lines)
34
+ else:
35
+ if len(data_lines[0].split()) == 1 + self.META["num_channels"]:
36
+ self._read_standard_dat_format(data_lines, True)
37
+ elif len(data_lines[0].split()) == self.META["num_channels"]:
38
+ self._read_standard_dat_format(data_lines, False)
39
+ else:
40
+ raise RuntimeError("Error: Number of column names and number of columns do not match any"
41
+ "expected pattern.")
42
+
43
+ elif file_extention == ".hex":
44
+ self.META["num_bytes"] = int(data_lines[0].split()[-1])
45
+ data_lines = data_lines[1:]
46
+ self._read_standard_hex_format(data_lines)
47
+
48
+ if len(self.DATA.index) != self.META["num_samples"]:
49
+ warnings.warn(f"{file_path} Mismatch between number of samples in specified header "
50
+ f"({self.META['num_samples']}) and number of samples read {len(self.DATA.index)}. Some "
51
+ "data may be missing")
52
+ return self.DATA
53
+
54
+ def _parse_meta(self, header_lines: list):
55
+ self.META["logger_model"] = header_lines[0].split()[1]
56
+ self.META["logger_sn"] = header_lines[0].split()[3]
57
+ sample_interval = dt.datetime.strptime(header_lines[5].split()[-1], "%H:%M:%S")
58
+ self.META["sample_interval"] = dt.timedelta(hours=sample_interval.hour, minutes=sample_interval.minute,
59
+ seconds=sample_interval.second)
60
+ # try:
61
+ self.META["logging_start"] = dt.datetime.strptime(" ".join(header_lines[3].split()[-2:]), "%y/%m/%d %H:%M:%S")
62
+ """
63
+ except ValueError:
64
+ date = header_lines[3].split()[-2]
65
+ if "00" in date.split("/"):
66
+ warnings.warn("Invalid logging start date given in header. Logger may have experienced power issues and"
67
+ "data may be corrupt")"""
68
+
69
+ line_7_info = header_lines[6].split(",")
70
+ self.META["num_channels"] = int(line_7_info[0].split()[-1])
71
+ self.META["num_samples"] = int(line_7_info[1].split()[-1])
72
+ self.META["precision"] = int(header_lines[9].split("%")[1][-2])
73
+
74
+ self.META["calibration_parameters"] = {}
75
+ calibration_start_line = 10
76
+ for i in range(self.META["num_channels"]):
77
+ self.META["calibration_parameters"][f"channel_{i + 1}"] = {}
78
+ line_num = calibration_start_line + i
79
+ raw_calibration = header_lines[line_num].split()
80
+ if raw_calibration[1] != "2":
81
+ raise ValueError(f"Calibration equation #{raw_calibration[1]} currently unsupported.")
82
+ self.META["calibration_parameters"][f"channel_{i + 1}"]["a0"] = float(raw_calibration[2])
83
+ self.META["calibration_parameters"][f"channel_{i + 1}"]["a1"] = float(raw_calibration[3])
84
+ self.META["calibration_parameters"][f"channel_{i + 1}"]["a2"] = float(raw_calibration[4])
85
+ if raw_calibration[5] == "0":
86
+ self.META["calibration_parameters"][f"channel_{i + 1}"]["a3"] = 1
87
+ else:
88
+ self.META["calibration_parameters"][f"channel_{i + 1}"]["a3"] = float(raw_calibration[2])
89
+ self.META['raw'] = "".join(header_lines)
90
+ return
91
+
92
+ def _read_daily_dat_format(self, raw_data: list):
93
+ """
94
+
95
+ Parameters
96
+ ----------
97
+ raw_data
98
+
99
+ Returns
100
+ -------
101
+
102
+ """
103
+ self.DATA = pd.DataFrame(columns=[f"channel_{i + 1}" for i in range(self.META["num_channels"])])
104
+ for line in raw_data:
105
+ if line != "\n":
106
+ if len(line) == 20 or len(line.split()) == self.META["num_channels"] + 2:
107
+ date_stamp = dt.datetime.strptime(" ".join(line.split()[0:2]), "%Y/%m/%d %H:%M:%S")
108
+ interval_num = 0
109
+ elif len(line.split()) == self.META["num_channels"] + 1:
110
+ self.DATA.loc[date_stamp + self.META["sample_interval"] * interval_num] = line.split()[1:]
111
+ interval_num += 1
112
+ else:
113
+ self.DATA.loc[date_stamp + self.META["sample_interval"] * interval_num] = line.split()
114
+ interval_num += 1
115
+ for col in self.DATA:
116
+ self.DATA[col] = pd.to_numeric(self.DATA[col], errors='coerce')
117
+ self.DATA.reset_index(inplace=True)
118
+ self.DATA.rename(columns={"index": "TIME"}, inplace=True)
119
+ return
120
+
121
+ def _read_standard_hex_format(self, raw_data: list):
122
+ byte_list = []
123
+ for line in raw_data:
124
+ eight_bytes = [line[i: i + 4] for i in range(0, len(line), 4)][:-1]
125
+ for byte in eight_bytes:
126
+ byte_list.append(byte)
127
+ byte_num = 0
128
+ self.DATA = pd.DataFrame(columns=[f"channel_{i + 1}" for i in range(self.META["num_channels"])])
129
+ line_num = 0
130
+ prev_line_day = 0
131
+ for line in range(self.META["num_samples"]):
132
+ line_time = self.META["logging_start"] + self.META["sample_interval"] * line_num
133
+ if line_time.day != prev_line_day:
134
+ byte_num += 7
135
+ prev_line_day = line_time.day
136
+ line_bytes = byte_list[byte_num: byte_num + 8]
137
+ line_temps = []
138
+ for channel in range(len(line_bytes)):
139
+ hex_val = line_bytes[channel]
140
+ first_digit = hex_val[0]
141
+ if first_digit == "0":
142
+ data_val = -int(hex_val[1:], 16)
143
+ if first_digit == "2":
144
+ data_val = int(hex_val[1:], 16)
145
+ elif first_digit in ["1", "3"]:
146
+ data_val = np.nan
147
+ if not np.isnan(data_val) and data_val > 0:
148
+ a0 = self.META["calibration_parameters"][f"channel_{channel + 1}"]["a0"]
149
+ a1 = self.META["calibration_parameters"][f"channel_{channel + 1}"]["a1"]
150
+ a2 = self.META["calibration_parameters"][f"channel_{channel + 1}"]["a2"]
151
+ a3 = self.META["calibration_parameters"][f"channel_{channel + 1}"]["a3"]
152
+ y = a2 * ((2048 * (a3 / data_val)) - 1)
153
+ temp = (a1 / ((a1 / 273.15) - np.log(a0 / y))) - 273.15
154
+ line_temps.append(round(temp, self.META["precision"]))
155
+ else:
156
+ line_temps.append(np.nan)
157
+ self.DATA.loc[line_time] = line_temps
158
+ byte_num += 8
159
+ line_num += 1
160
+ for col in self.DATA:
161
+ self.DATA[col] = pd.to_numeric(self.DATA[col], errors='coerce')
162
+ self.DATA.reset_index(inplace=True)
163
+ self.DATA.rename(columns={"index": "TIME"}, inplace=True)
164
+ return
165
+
166
+ def _read_standard_dat_format(self, raw_data: list, line_numbers=False):
167
+ """
168
+
169
+ Parameters
170
+ ----------
171
+ raw_data
172
+ line_numbers
173
+
174
+ Returns
175
+ -------
176
+
177
+ """
178
+ self.DATA = pd.DataFrame(columns=[f"channel_{i + 1}" for i in range(self.META["num_channels"])])
179
+ line_num = 0
180
+ for line in raw_data:
181
+ line_data = line.split()
182
+ if line_numbers:
183
+ line_data = line_data[1:]
184
+ self.DATA.loc[self.META["logging_start"] + self.META["sample_interval"] * line_num] = line_data
185
+ line_num += 1
186
+ for col in self.DATA:
187
+ self.DATA[col] = pd.to_numeric(self.DATA[col], errors='coerce')
188
+ self.DATA.reset_index(inplace=True)
189
+ self.DATA.rename(columns={"index": "TIME"}, inplace=True)
190
+ return