tsp 1.7.3__py3-none-any.whl → 1.7.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tsp might be problematic. Click here for more details.
- tsp/__init__.py +11 -11
- tsp/__meta__.py +1 -1
- tsp/core.py +1035 -1010
- tsp/data/2023-01-06_755-test-Dataset_2031-Constant_Over_Interval-Hourly-Ground_Temperature-Thermistor_Automated.timeserie.csv +4 -4
- tsp/data/2023-01-06_755-test.metadata.txt +208 -208
- tsp/data/NTGS_example_csv.csv +6 -0
- tsp/data/NTGS_example_slash_dates.csv +6 -0
- tsp/data/example_geotop.csv +5240 -5240
- tsp/data/example_gtnp.csv +1298 -1298
- tsp/data/example_permos.csv +8 -0
- tsp/data/test_geotop_has_space.txt +5 -5
- tsp/dataloggers/AbstractReader.py +43 -43
- tsp/dataloggers/FG2.py +110 -110
- tsp/dataloggers/GP5W.py +114 -114
- tsp/dataloggers/Geoprecision.py +34 -34
- tsp/dataloggers/HOBO.py +914 -914
- tsp/dataloggers/RBRXL800.py +190 -190
- tsp/dataloggers/RBRXR420.py +308 -308
- tsp/dataloggers/__init__.py +15 -15
- tsp/dataloggers/logr.py +115 -115
- tsp/dataloggers/test_files/004448.DAT +2543 -2543
- tsp/dataloggers/test_files/004531.DAT +17106 -17106
- tsp/dataloggers/test_files/004531.HEX +3587 -3587
- tsp/dataloggers/test_files/004534.HEX +3587 -3587
- tsp/dataloggers/test_files/010252.dat +1731 -1731
- tsp/dataloggers/test_files/010252.hex +1739 -1739
- tsp/dataloggers/test_files/010274.hex +1291 -1291
- tsp/dataloggers/test_files/010278.hex +3544 -3544
- tsp/dataloggers/test_files/012064.dat +1286 -1286
- tsp/dataloggers/test_files/012064.hex +1294 -1294
- tsp/dataloggers/test_files/012081.hex +3532 -3532
- tsp/dataloggers/test_files/07B1592.DAT +1483 -1483
- tsp/dataloggers/test_files/07B1592.HEX +1806 -1806
- tsp/dataloggers/test_files/07B4450.DAT +2234 -2234
- tsp/dataloggers/test_files/07B4450.HEX +2559 -2559
- tsp/dataloggers/test_files/CSc_CR1000_1.dat +295 -0
- tsp/dataloggers/test_files/FG2_399.csv +9881 -9881
- tsp/dataloggers/test_files/GP5W.csv +1121 -1121
- tsp/dataloggers/test_files/GP5W_260.csv +1884 -1884
- tsp/dataloggers/test_files/GP5W_270.csv +2210 -2210
- tsp/dataloggers/test_files/H08-030-08_HOBOware.csv +998 -998
- tsp/dataloggers/test_files/RBR_01.dat +1046 -1046
- tsp/dataloggers/test_files/RBR_02.dat +2426 -2426
- tsp/dataloggers/test_files/RSTDT2055.csv +2152 -2152
- tsp/dataloggers/test_files/U23-001_HOBOware.csv +1001 -1001
- tsp/dataloggers/test_files/hobo-negative-2.txt +6396 -6396
- tsp/dataloggers/test_files/hobo-negative-3.txt +5593 -5593
- tsp/dataloggers/test_files/hobo-positive-number-1.txt +1000 -1000
- tsp/dataloggers/test_files/hobo-positive-number-2.csv +1003 -1003
- tsp/dataloggers/test_files/hobo-positive-number-3.csv +1133 -1133
- tsp/dataloggers/test_files/hobo-positive-number-4.csv +1209 -1209
- tsp/dataloggers/test_files/hobo2.csv +8702 -8702
- tsp/dataloggers/test_files/hobo_1_AB.csv +21732 -21732
- tsp/dataloggers/test_files/hobo_1_AB_Details.txt +133 -133
- tsp/dataloggers/test_files/hobo_1_AB_classic.csv +4373 -4373
- tsp/dataloggers/test_files/hobo_1_AB_defaults.csv +21732 -21732
- tsp/dataloggers/test_files/hobo_1_AB_minimal.txt +1358 -1358
- tsp/dataloggers/test_files/hobo_1_AB_var2.csv +3189 -3189
- tsp/dataloggers/test_files/hobo_1_AB_var3.csv +2458 -2458
- tsp/dataloggers/test_files/logR_ULogC16-32_1.csv +106 -106
- tsp/dataloggers/test_files/logR_ULogC16-32_2.csv +100 -100
- tsp/dataloggers/test_files/mon_3_Ta_2010-08-18_2013-02-08.txt +21724 -21724
- tsp/dataloggers/test_files/rbr_001.dat +1133 -1133
- tsp/dataloggers/test_files/rbr_001.hex +1139 -1139
- tsp/dataloggers/test_files/rbr_001_no_comment.dat +1132 -1132
- tsp/dataloggers/test_files/rbr_001_no_comment.hex +1138 -1138
- tsp/dataloggers/test_files/rbr_002.dat +1179 -1179
- tsp/dataloggers/test_files/rbr_002.hex +1185 -1185
- tsp/dataloggers/test_files/rbr_003.hex +1292 -1292
- tsp/dataloggers/test_files/rbr_003.xls +0 -0
- tsp/dataloggers/test_files/rbr_xl_001.DAT +1105 -1105
- tsp/dataloggers/test_files/rbr_xl_002.DAT +1126 -1126
- tsp/dataloggers/test_files/rbr_xl_003.DAT +4622 -4622
- tsp/dataloggers/test_files/rbr_xl_003.HEX +3587 -3587
- tsp/gtnp.py +148 -141
- tsp/labels.py +3 -3
- tsp/misc.py +90 -90
- tsp/physics.py +101 -101
- tsp/plots/static.py +374 -305
- tsp/readers.py +548 -536
- tsp/scratch.py +6 -0
- tsp/time.py +45 -45
- tsp/tspwarnings.py +15 -0
- tsp/utils.py +101 -101
- tsp/version.py +1 -1
- {tsp-1.7.3.dist-info → tsp-1.7.7.dist-info}/LICENSE +674 -674
- {tsp-1.7.3.dist-info → tsp-1.7.7.dist-info}/METADATA +9 -5
- tsp-1.7.7.dist-info/RECORD +95 -0
- {tsp-1.7.3.dist-info → tsp-1.7.7.dist-info}/WHEEL +5 -5
- tsp-1.7.3.dist-info/RECORD +0 -89
- {tsp-1.7.3.dist-info → tsp-1.7.7.dist-info}/top_level.txt +0 -0
tsp/scratch.py
ADDED
tsp/time.py
CHANGED
|
@@ -1,46 +1,46 @@
|
|
|
1
|
-
import re
|
|
2
|
-
from datetime import datetime, tzinfo
|
|
3
|
-
|
|
4
|
-
from typing import Union
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
def get_utc_offset(offset: "Union[str,int]") -> int:
|
|
8
|
-
"""Get the UTC offset in seconds from a string or integer"""
|
|
9
|
-
|
|
10
|
-
if isinstance(offset, str):
|
|
11
|
-
if offset.lower() == "utc" or (offset.lower() == "z"):
|
|
12
|
-
return 0
|
|
13
|
-
|
|
14
|
-
pattern = re.compile(r"([+-]?)(\d{2}):(\d{2})")
|
|
15
|
-
match = pattern.match(offset)
|
|
16
|
-
|
|
17
|
-
if not match:
|
|
18
|
-
raise ValueError("Offset must be a string in the format '+HH:MM' or '-HH:MM'")
|
|
19
|
-
|
|
20
|
-
sign = match.group(1)
|
|
21
|
-
hours = int(match.group(2))
|
|
22
|
-
minutes = int(match.group(3))
|
|
23
|
-
utc_offset = (hours*60 + minutes)*60
|
|
24
|
-
if sign == "-":
|
|
25
|
-
utc_offset *= -1
|
|
26
|
-
|
|
27
|
-
elif isinstance(offset, int):
|
|
28
|
-
utc_offset = offset
|
|
29
|
-
|
|
30
|
-
else:
|
|
31
|
-
raise ValueError("Offset must be a string in the format '+HH:MM' or '-HH:MM' or an integer in seconds")
|
|
32
|
-
|
|
33
|
-
return utc_offset
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
def format_utc_offset(offset: tzinfo) -> str:
|
|
37
|
-
"""Format a UTC offset as a string in the format '+HH:MM' or '-HH:MM'"""
|
|
38
|
-
utc_offset = offset.utcoffset(datetime.now()).total_seconds()
|
|
39
|
-
sign = "-" if utc_offset < 0 else "+"
|
|
40
|
-
hours = int(abs(utc_offset)//3600)
|
|
41
|
-
minutes = int(abs(utc_offset)%3600/60)
|
|
42
|
-
|
|
43
|
-
if hours == 0 and minutes == 0:
|
|
44
|
-
return "UTC"
|
|
45
|
-
|
|
1
|
+
import re
|
|
2
|
+
from datetime import datetime, tzinfo
|
|
3
|
+
|
|
4
|
+
from typing import Union
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def get_utc_offset(offset: "Union[str,int]") -> int:
|
|
8
|
+
"""Get the UTC offset in seconds from a string or integer"""
|
|
9
|
+
|
|
10
|
+
if isinstance(offset, str):
|
|
11
|
+
if offset.lower() == "utc" or (offset.lower() == "z"):
|
|
12
|
+
return 0
|
|
13
|
+
|
|
14
|
+
pattern = re.compile(r"([+-]?)(\d{2}):(\d{2})")
|
|
15
|
+
match = pattern.match(offset)
|
|
16
|
+
|
|
17
|
+
if not match:
|
|
18
|
+
raise ValueError("Offset must be a string in the format '+HH:MM' or '-HH:MM'")
|
|
19
|
+
|
|
20
|
+
sign = match.group(1)
|
|
21
|
+
hours = int(match.group(2))
|
|
22
|
+
minutes = int(match.group(3))
|
|
23
|
+
utc_offset = (hours*60 + minutes)*60
|
|
24
|
+
if sign == "-":
|
|
25
|
+
utc_offset *= -1
|
|
26
|
+
|
|
27
|
+
elif isinstance(offset, int):
|
|
28
|
+
utc_offset = offset
|
|
29
|
+
|
|
30
|
+
else:
|
|
31
|
+
raise ValueError("Offset must be a string in the format '+HH:MM' or '-HH:MM' or an integer in seconds")
|
|
32
|
+
|
|
33
|
+
return utc_offset
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def format_utc_offset(offset: tzinfo) -> str:
|
|
37
|
+
"""Format a UTC offset as a string in the format '+HH:MM' or '-HH:MM'"""
|
|
38
|
+
utc_offset = offset.utcoffset(datetime.now()).total_seconds()
|
|
39
|
+
sign = "-" if utc_offset < 0 else "+"
|
|
40
|
+
hours = int(abs(utc_offset)//3600)
|
|
41
|
+
minutes = int(abs(utc_offset)%3600/60)
|
|
42
|
+
|
|
43
|
+
if hours == 0 and minutes == 0:
|
|
44
|
+
return "UTC"
|
|
45
|
+
|
|
46
46
|
return f"{sign}{hours:02d}:{minutes:02d}"
|
tsp/tspwarnings.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class DuplicateTimesWarning(UserWarning):
|
|
5
|
+
"""For when duplicate times are found in a file."""
|
|
6
|
+
def __init__(self, times):
|
|
7
|
+
self.times = times
|
|
8
|
+
|
|
9
|
+
def _msg(self, times) -> str:
|
|
10
|
+
m = f"Duplicate timestamps found: {times[np.where(times.duplicated())[0]]}. That's bad."
|
|
11
|
+
return m
|
|
12
|
+
|
|
13
|
+
def __str__(self):
|
|
14
|
+
return self._msg(self.times)
|
|
15
|
+
|
tsp/utils.py
CHANGED
|
@@ -1,101 +1,101 @@
|
|
|
1
|
-
import pandas as pd
|
|
2
|
-
import numpy as np
|
|
3
|
-
|
|
4
|
-
import tsp
|
|
5
|
-
from tsp import TSP
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
def resolve_duplicate_times(t: TSP, keep="first") -> TSP:
|
|
9
|
-
"""Eliminate duplicate times in a TSP.
|
|
10
|
-
|
|
11
|
-
Parameters
|
|
12
|
-
----------
|
|
13
|
-
tsp : TSP
|
|
14
|
-
TSP to resolve duplicate times in.
|
|
15
|
-
keep : str, optional
|
|
16
|
-
Method to resolve duplicate times. Chosen from "first", "average", "last", "strip"
|
|
17
|
-
by default "first"
|
|
18
|
-
|
|
19
|
-
Returns
|
|
20
|
-
-------
|
|
21
|
-
TSP
|
|
22
|
-
TSP with no duplicated times."""
|
|
23
|
-
resolver = _get_duplicate_resolver(keep)
|
|
24
|
-
return resolver(t)
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
def _get_duplicate_resolver(keep: str):
|
|
28
|
-
if keep == "first":
|
|
29
|
-
return _first_duplicate_time
|
|
30
|
-
elif keep == "average":
|
|
31
|
-
return _average_duplicate_time
|
|
32
|
-
elif keep == "last":
|
|
33
|
-
return _last_duplicate_time
|
|
34
|
-
elif keep == "strip":
|
|
35
|
-
return _strip_duplicate_time
|
|
36
|
-
else:
|
|
37
|
-
raise ValueError(f"Unknown duplicate resolver method: {keep}")
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
def _first_duplicate_time(t: TSP):
|
|
41
|
-
df = t.wide
|
|
42
|
-
df = df[~df.index.duplicated(keep="first")]
|
|
43
|
-
|
|
44
|
-
time = df.index
|
|
45
|
-
values = df.drop(['time'], axis=1).values
|
|
46
|
-
depths = df.drop(['time'], axis=1).columns
|
|
47
|
-
|
|
48
|
-
t_new = TSP(times=time, values=values, depths=depths,
|
|
49
|
-
latitude=t.latitude, longitude=t.longitude,
|
|
50
|
-
site_id=t.site_id, metadata=t.metadata)
|
|
51
|
-
|
|
52
|
-
return t_new
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
def _last_duplicate_time(t: TSP):
|
|
56
|
-
df = t.wide
|
|
57
|
-
df = df[~df.index.duplicated(keep="last")]
|
|
58
|
-
|
|
59
|
-
time = df.index
|
|
60
|
-
values = df.drop(['time'], axis=1).values
|
|
61
|
-
depths = df.drop(['time'], axis=1).columns
|
|
62
|
-
|
|
63
|
-
t_new = TSP(times=time, values=values, depths=depths,
|
|
64
|
-
latitude=t.latitude, longitude=t.longitude,
|
|
65
|
-
site_id=t.site_id, metadata=t.metadata)
|
|
66
|
-
|
|
67
|
-
return t_new
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
def _strip_duplicate_time(t: TSP):
|
|
71
|
-
df = t.wide
|
|
72
|
-
df = df[~df.index.duplicated(keep=False)]
|
|
73
|
-
|
|
74
|
-
time = df.index
|
|
75
|
-
values = df.drop(['time'], axis=1).values
|
|
76
|
-
depths = df.drop(['time'], axis=1).columns
|
|
77
|
-
|
|
78
|
-
t_new = TSP(times=time, values=values, depths=depths,
|
|
79
|
-
latitude=t.latitude, longitude=t.longitude,
|
|
80
|
-
site_id=t.site_id, metadata=t.metadata)
|
|
81
|
-
|
|
82
|
-
return t_new
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
def _average_duplicate_time(t: TSP):
|
|
86
|
-
singleton = t.wide[~t.wide.index.duplicated(keep=False)]
|
|
87
|
-
duplicated = t.wide[t.wide.index.duplicated(keep=False)].drop(['time'], axis=1).reset_index()
|
|
88
|
-
averaged = duplicated.groupby(duplicated['index']).apply(lambda x: x[~x.isna()].mean(numeric_only=True))
|
|
89
|
-
averaged.insert(0, 'time',averaged.index)
|
|
90
|
-
|
|
91
|
-
df = pd.concat([singleton, averaged], ignore_index=False).sort_index()
|
|
92
|
-
|
|
93
|
-
time = df.index
|
|
94
|
-
values = df.drop(['time'], axis=1).values
|
|
95
|
-
depths = df.drop(['time'], axis=1).columns
|
|
96
|
-
|
|
97
|
-
t_new = TSP(times=time, values=values, depths=depths,
|
|
98
|
-
latitude=t.latitude, longitude=t.longitude,
|
|
99
|
-
site_id=t.site_id, metadata=t.metadata)
|
|
100
|
-
|
|
101
|
-
return t_new
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import numpy as np
|
|
3
|
+
|
|
4
|
+
import tsp
|
|
5
|
+
from tsp import TSP
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def resolve_duplicate_times(t: TSP, keep="first") -> TSP:
|
|
9
|
+
"""Eliminate duplicate times in a TSP.
|
|
10
|
+
|
|
11
|
+
Parameters
|
|
12
|
+
----------
|
|
13
|
+
tsp : TSP
|
|
14
|
+
TSP to resolve duplicate times in.
|
|
15
|
+
keep : str, optional
|
|
16
|
+
Method to resolve duplicate times. Chosen from "first", "average", "last", "strip"
|
|
17
|
+
by default "first"
|
|
18
|
+
|
|
19
|
+
Returns
|
|
20
|
+
-------
|
|
21
|
+
TSP
|
|
22
|
+
TSP with no duplicated times."""
|
|
23
|
+
resolver = _get_duplicate_resolver(keep)
|
|
24
|
+
return resolver(t)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _get_duplicate_resolver(keep: str):
|
|
28
|
+
if keep == "first":
|
|
29
|
+
return _first_duplicate_time
|
|
30
|
+
elif keep == "average":
|
|
31
|
+
return _average_duplicate_time
|
|
32
|
+
elif keep == "last":
|
|
33
|
+
return _last_duplicate_time
|
|
34
|
+
elif keep == "strip":
|
|
35
|
+
return _strip_duplicate_time
|
|
36
|
+
else:
|
|
37
|
+
raise ValueError(f"Unknown duplicate resolver method: {keep}")
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _first_duplicate_time(t: TSP):
|
|
41
|
+
df = t.wide
|
|
42
|
+
df = df[~df.index.duplicated(keep="first")]
|
|
43
|
+
|
|
44
|
+
time = df.index
|
|
45
|
+
values = df.drop(['time'], axis=1).values
|
|
46
|
+
depths = df.drop(['time'], axis=1).columns
|
|
47
|
+
|
|
48
|
+
t_new = TSP(times=time, values=values, depths=depths,
|
|
49
|
+
latitude=t.latitude, longitude=t.longitude,
|
|
50
|
+
site_id=t.site_id, metadata=t.metadata)
|
|
51
|
+
|
|
52
|
+
return t_new
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def _last_duplicate_time(t: TSP):
|
|
56
|
+
df = t.wide
|
|
57
|
+
df = df[~df.index.duplicated(keep="last")]
|
|
58
|
+
|
|
59
|
+
time = df.index
|
|
60
|
+
values = df.drop(['time'], axis=1).values
|
|
61
|
+
depths = df.drop(['time'], axis=1).columns
|
|
62
|
+
|
|
63
|
+
t_new = TSP(times=time, values=values, depths=depths,
|
|
64
|
+
latitude=t.latitude, longitude=t.longitude,
|
|
65
|
+
site_id=t.site_id, metadata=t.metadata)
|
|
66
|
+
|
|
67
|
+
return t_new
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def _strip_duplicate_time(t: TSP):
|
|
71
|
+
df = t.wide
|
|
72
|
+
df = df[~df.index.duplicated(keep=False)]
|
|
73
|
+
|
|
74
|
+
time = df.index
|
|
75
|
+
values = df.drop(['time'], axis=1).values
|
|
76
|
+
depths = df.drop(['time'], axis=1).columns
|
|
77
|
+
|
|
78
|
+
t_new = TSP(times=time, values=values, depths=depths,
|
|
79
|
+
latitude=t.latitude, longitude=t.longitude,
|
|
80
|
+
site_id=t.site_id, metadata=t.metadata)
|
|
81
|
+
|
|
82
|
+
return t_new
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def _average_duplicate_time(t: TSP):
|
|
86
|
+
singleton = t.wide[~t.wide.index.duplicated(keep=False)]
|
|
87
|
+
duplicated = t.wide[t.wide.index.duplicated(keep=False)].drop(['time'], axis=1).reset_index()
|
|
88
|
+
averaged = duplicated.groupby(duplicated['index']).apply(lambda x: x[~x.isna()].mean(numeric_only=True))
|
|
89
|
+
averaged.insert(0, 'time',averaged.index)
|
|
90
|
+
|
|
91
|
+
df = pd.concat([singleton, averaged], ignore_index=False).sort_index()
|
|
92
|
+
|
|
93
|
+
time = df.index
|
|
94
|
+
values = df.drop(['time'], axis=1).values
|
|
95
|
+
depths = df.drop(['time'], axis=1).columns
|
|
96
|
+
|
|
97
|
+
t_new = TSP(times=time, values=values, depths=depths,
|
|
98
|
+
latitude=t.latitude, longitude=t.longitude,
|
|
99
|
+
site_id=t.site_id, metadata=t.metadata)
|
|
100
|
+
|
|
101
|
+
return t_new
|
tsp/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
version="1.7.
|
|
1
|
+
version="1.7.7"
|