tsp 1.8.1__py3-none-any.whl → 1.10.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. tsp/__init__.py +11 -11
  2. tsp/__meta__.py +1 -1
  3. tsp/concatenation.py +159 -153
  4. tsp/core.py +1306 -1162
  5. tsp/data/2023-01-06_755-test-Dataset_2031-Constant_Over_Interval-Hourly-Ground_Temperature-Thermistor_Automated.timeserie.csv +4 -4
  6. tsp/data/2023-01-06_755-test.metadata.txt +208 -208
  7. tsp/data/NTGS_example_csv.csv +6 -6
  8. tsp/data/NTGS_example_slash_dates.csv +6 -6
  9. tsp/data/NTGS_gtr_example_excel.xlsx +0 -0
  10. tsp/data/example_geotop.csv +5240 -5240
  11. tsp/data/example_gtnp.csv +1298 -1298
  12. tsp/data/example_permos.csv +7 -7
  13. tsp/data/ntgs-db-multi.txt +3872 -0
  14. tsp/data/ntgs-db-single.txt +2251 -0
  15. tsp/data/test_geotop_has_space.txt +5 -5
  16. tsp/data/tsp_format_long.csv +10 -0
  17. tsp/data/tsp_format_wide_1.csv +7 -0
  18. tsp/data/tsp_format_wide_2.csv +7 -0
  19. tsp/dataloggers/AbstractReader.py +43 -43
  20. tsp/dataloggers/FG2.py +110 -110
  21. tsp/dataloggers/GP5W.py +114 -114
  22. tsp/dataloggers/Geoprecision.py +34 -34
  23. tsp/dataloggers/HOBO.py +930 -914
  24. tsp/dataloggers/RBRXL800.py +190 -190
  25. tsp/dataloggers/RBRXR420.py +371 -308
  26. tsp/dataloggers/Vemco.py +84 -0
  27. tsp/dataloggers/__init__.py +15 -15
  28. tsp/dataloggers/logr.py +196 -115
  29. tsp/dataloggers/test_files/004448.DAT +2543 -2543
  30. tsp/dataloggers/test_files/004531.DAT +17106 -17106
  31. tsp/dataloggers/test_files/004531.HEX +3587 -3587
  32. tsp/dataloggers/test_files/004534.HEX +3587 -3587
  33. tsp/dataloggers/test_files/010252.dat +1731 -1731
  34. tsp/dataloggers/test_files/010252.hex +1739 -1739
  35. tsp/dataloggers/test_files/010274.hex +1291 -1291
  36. tsp/dataloggers/test_files/010278.hex +3544 -3544
  37. tsp/dataloggers/test_files/012064.dat +1286 -1286
  38. tsp/dataloggers/test_files/012064.hex +1294 -1294
  39. tsp/dataloggers/test_files/012064_modified_start.hex +1294 -0
  40. tsp/dataloggers/test_files/012081.hex +3532 -3532
  41. tsp/dataloggers/test_files/013138_recovery_stamp.hex +1123 -0
  42. tsp/dataloggers/test_files/014037-2007.hex +95 -0
  43. tsp/dataloggers/test_files/019360_20160918_1146_SlumpIslandTopofHill.hex +11253 -0
  44. tsp/dataloggers/test_files/019360_20160918_1146_SlumpIslandTopofHill.xls +0 -0
  45. tsp/dataloggers/test_files/07B1592.DAT +1483 -1483
  46. tsp/dataloggers/test_files/07B1592.HEX +1806 -1806
  47. tsp/dataloggers/test_files/07B4450.DAT +2234 -2234
  48. tsp/dataloggers/test_files/07B4450.HEX +2559 -2559
  49. tsp/dataloggers/test_files/2022018_2025-09-18T22-16-16.txt +36 -0
  50. tsp/dataloggers/test_files/2022018_2025-09-18T22-16-16_raw.csv +2074 -0
  51. tsp/dataloggers/test_files/2022018_2025-09-18T22-16-16_temp.csv +2074 -0
  52. tsp/dataloggers/test_files/2025004_2025-12-02T17-07-28_cfg.txt +30 -0
  53. tsp/dataloggers/test_files/2025004_2025-12-02T17-07-28_raw.csv +35 -0
  54. tsp/dataloggers/test_files/2025004_2025-12-02T17-07-28_temp.csv +35 -0
  55. tsp/dataloggers/test_files/204087.xlsx +0 -0
  56. tsp/dataloggers/test_files/Asc-1455As02.000 +2982 -0
  57. tsp/dataloggers/test_files/Asc-1456As02.000 +2992 -0
  58. tsp/dataloggers/test_files/Asc-1457As02.000 +2917 -0
  59. tsp/dataloggers/test_files/BGC_BH15_019362_20140610_1253.hex +1729 -0
  60. tsp/dataloggers/test_files/Bin2944.csv +759 -0
  61. tsp/dataloggers/test_files/Bin5494.csv +2972 -0
  62. tsp/dataloggers/test_files/Bin6786.csv +272 -0
  63. tsp/dataloggers/test_files/FG2_399.csv +9881 -9881
  64. tsp/dataloggers/test_files/GP5W.csv +1121 -1121
  65. tsp/dataloggers/test_files/GP5W_260.csv +1884 -1884
  66. tsp/dataloggers/test_files/GP5W_270.csv +2210 -2210
  67. tsp/dataloggers/test_files/H08-030-08_HOBOware.csv +998 -998
  68. tsp/dataloggers/test_files/Minilog-II-T_350763_20190711_1.csv +2075 -0
  69. tsp/dataloggers/test_files/Minilog-II-T_350769_20190921_1.csv +6384 -0
  70. tsp/dataloggers/test_files/Minilog-II-T_354284_20190921_1.csv +4712 -0
  71. tsp/dataloggers/test_files/Minilog-T_7943_20140920_1.csv +5826 -0
  72. tsp/dataloggers/test_files/Minilog-T_8979_20140806_1.csv +2954 -0
  73. tsp/dataloggers/test_files/Minilog-T_975_20110824_1.csv +4343 -0
  74. tsp/dataloggers/test_files/RBR_01.dat +1046 -1046
  75. tsp/dataloggers/test_files/RBR_02.dat +2426 -2426
  76. tsp/dataloggers/test_files/RI03b_062831_20240905_1801.rsk +0 -0
  77. tsp/dataloggers/test_files/RI03b_062831_20240905_1801.xlsx +0 -0
  78. tsp/dataloggers/test_files/RSTDT2055.csv +2152 -2152
  79. tsp/dataloggers/test_files/U23-001_HOBOware.csv +1001 -1001
  80. tsp/dataloggers/test_files/hobo-negative-2.txt +6396 -6396
  81. tsp/dataloggers/test_files/hobo-negative-3.txt +5593 -5593
  82. tsp/dataloggers/test_files/hobo-positive-number-1.txt +1000 -1000
  83. tsp/dataloggers/test_files/hobo-positive-number-2.csv +1003 -1003
  84. tsp/dataloggers/test_files/hobo-positive-number-3.csv +1133 -1133
  85. tsp/dataloggers/test_files/hobo-positive-number-4.csv +1209 -1209
  86. tsp/dataloggers/test_files/hobo2.csv +8702 -8702
  87. tsp/dataloggers/test_files/hobo_1_AB.csv +21732 -21732
  88. tsp/dataloggers/test_files/hobo_1_AB_Details.txt +133 -133
  89. tsp/dataloggers/test_files/hobo_1_AB_classic.csv +4373 -4373
  90. tsp/dataloggers/test_files/hobo_1_AB_defaults.csv +21732 -21732
  91. tsp/dataloggers/test_files/hobo_1_AB_minimal.txt +1358 -1358
  92. tsp/dataloggers/test_files/hobo_1_AB_var2.csv +3189 -3189
  93. tsp/dataloggers/test_files/hobo_1_AB_var3.csv +2458 -2458
  94. tsp/dataloggers/test_files/logR_ULogC16-32_1.csv +106 -106
  95. tsp/dataloggers/test_files/logR_ULogC16-32_2.csv +100 -100
  96. tsp/dataloggers/test_files/mon_3_Ta_2010-08-18_2013-02-08.txt +21724 -21724
  97. tsp/dataloggers/test_files/rbr_001.dat +1133 -1133
  98. tsp/dataloggers/test_files/rbr_001.hex +1139 -1139
  99. tsp/dataloggers/test_files/rbr_001_no_comment.dat +1132 -1132
  100. tsp/dataloggers/test_files/rbr_001_no_comment.hex +1138 -1138
  101. tsp/dataloggers/test_files/rbr_002.dat +1179 -1179
  102. tsp/dataloggers/test_files/rbr_002.hex +1185 -1185
  103. tsp/dataloggers/test_files/rbr_003.hex +1292 -1292
  104. tsp/dataloggers/test_files/rbr_xl_001.DAT +1105 -1105
  105. tsp/dataloggers/test_files/rbr_xl_002.DAT +1126 -1126
  106. tsp/dataloggers/test_files/rbr_xl_003.DAT +4622 -4622
  107. tsp/dataloggers/test_files/rbr_xl_003.HEX +3587 -3587
  108. tsp/gtnp.py +148 -148
  109. tsp/labels.py +3 -3
  110. tsp/misc.py +90 -90
  111. tsp/physics.py +101 -101
  112. tsp/plots/static.py +388 -374
  113. tsp/readers.py +829 -548
  114. tsp/standardization/__init__.py +0 -0
  115. tsp/standardization/metadata.py +95 -0
  116. tsp/standardization/metadata_ref.py +0 -0
  117. tsp/standardization/validator.py +535 -0
  118. tsp/time.py +45 -45
  119. tsp/tspwarnings.py +27 -15
  120. tsp/utils.py +131 -101
  121. tsp/version.py +1 -1
  122. {tsp-1.8.1.dist-info → tsp-1.10.2.dist-info}/METADATA +95 -86
  123. tsp-1.10.2.dist-info/RECORD +132 -0
  124. {tsp-1.8.1.dist-info → tsp-1.10.2.dist-info}/licenses/LICENSE +674 -674
  125. {tsp-1.8.1.dist-info → tsp-1.10.2.dist-info}/top_level.txt +1 -0
  126. tsp-1.8.1.dist-info/RECORD +0 -94
  127. {tsp-1.8.1.dist-info → tsp-1.10.2.dist-info}/WHEEL +0 -0
tsp/gtnp.py CHANGED
@@ -1,148 +1,148 @@
1
- from datetime import timezone, timedelta, tzinfo
2
- from typing import Optional
3
- from collections import OrderedDict
4
-
5
- import re
6
- import warnings
7
-
8
- from tsp.time import get_utc_offset
9
-
10
-
11
- class GtnpMetadata:
12
- def __init__(self, filepath):
13
- """A class to read GTN-P metadata files
14
-
15
- Parameters
16
- ----------
17
- filepath : str
18
- Path to GTN-P *.metadata.txt file.
19
- """
20
- self.filepath = filepath
21
- self._dict = OrderedDict()
22
- self._read()
23
- self._parse()
24
-
25
- def _read(self):
26
- try:
27
- with open(self.filepath, 'r') as f:
28
- self._raw = f.readlines()
29
-
30
- except UnicodeDecodeError:
31
- warnings.warn("Couldn't read file with utf-8 encoding. Metadata might be corrupted.")
32
- with open(self.filepath, 'r', errors='ignore') as f:
33
- self._raw = f.readlines()
34
-
35
- @property
36
- def raw(self) -> 'list[str]':
37
- return self._raw
38
-
39
- @raw.setter
40
- def raw(self, value):
41
- raise ValueError("Cannot set")
42
-
43
- @property
44
- def parsed(self) -> dict:
45
- return self._dict
46
-
47
- def _parse(self):
48
- lines = [line for line in self._raw] # Make a copy in case we need to use fallback plan
49
-
50
- try:
51
- self._dict = OrderedDict()
52
- recursively_build_metadata(lines, self._dict)
53
-
54
- except Exception:
55
- print("Couldn't build nested dictionary. Fallback to simple dictionary.")
56
- self._dict = OrderedDict()
57
- self._parse_dict()
58
-
59
-
60
- def _parse_dict(self) -> None:
61
- pattern = re.compile(r"^([^:]+):\s*(.*)$")
62
-
63
- for line in self._raw:
64
- result = pattern.match(line)
65
- if result:
66
- key, value = result.groups()
67
-
68
- if value.strip() != "":
69
- self._dict[key] = value.strip()
70
-
71
- def get_timezone(self) -> Optional[tzinfo]:
72
- try:
73
- zone = self._dict['Timezone']
74
- except KeyError:
75
- return None
76
-
77
- if zone == 'UTC':
78
- return timezone.utc
79
- elif isinstance(zone, str):
80
- seconds = get_utc_offset(zone.strip())
81
- tz = timezone(timedelta(seconds=seconds))
82
- return tz
83
-
84
- def get_latitude(self) -> Optional[float]:
85
- try:
86
- return float(self._dict['Latitude'])
87
- except KeyError:
88
- return None
89
-
90
- def get_longitude(self) -> Optional[float]:
91
- try:
92
- return float(self._dict['Longitude'])
93
- except KeyError:
94
- return None
95
-
96
-
97
- def recursively_build_metadata(lines: list, odict: OrderedDict, depth:int=0) -> None:
98
- """ A recursive function to build an OrderedDict from a list of lines.
99
-
100
- The function expects lines to be in the format:
101
- Key: Value
102
- Key: Value
103
- Key:
104
- Subkey: Multi line Subvalue
105
- Multi line Subvalue
106
- Multi line Subvalue
107
- Subkey: Subvalue
108
- Subkey:
109
- Subsubkey: Subsubvalue
110
-
111
- Parameters
112
- ----------
113
- lines : list
114
- A list of lines from a metadata file.
115
- odict : OrderedDict
116
- An OrderedDict to build.
117
- depth : int, optional
118
- The depth of the OrderedDict, by default 0
119
-
120
- """
121
- pattern = re.compile(r"^(\t*)([^:]+):\s*(.*)$")
122
-
123
- while lines:
124
- line = lines.pop(0)
125
- result = pattern.match(line)
126
-
127
- if result:
128
- tabs, key, value = result.groups()
129
-
130
- if len(tabs) < depth: # Un-indent, return to previous level
131
- lines.insert(0, line)
132
- return
133
-
134
- if value.strip() != "": # Valid key:value pair
135
- odict[key] = value.strip()
136
-
137
- else: # Empty value, recurse
138
- odict[key] = OrderedDict()
139
- recursively_build_metadata(lines, odict[key], depth=depth+1)
140
-
141
- else: # Multi-line value
142
- try:
143
- odict[next(reversed(odict))] = odict[next(reversed(odict))] + line
144
- except StopIteration: # If no key:value pair has been added yet
145
- continue
146
- except TypeError: # If the value is not a string
147
- continue
148
- continue
1
+ from datetime import timezone, timedelta, tzinfo
2
+ from typing import Optional
3
+ from collections import OrderedDict
4
+
5
+ import re
6
+ import warnings
7
+
8
+ from tsp.time import get_utc_offset
9
+
10
+
11
+ class GtnpMetadata:
12
+ def __init__(self, filepath):
13
+ """A class to read GTN-P metadata files
14
+
15
+ Parameters
16
+ ----------
17
+ filepath : str
18
+ Path to GTN-P *.metadata.txt file.
19
+ """
20
+ self.filepath = filepath
21
+ self._dict = OrderedDict()
22
+ self._read()
23
+ self._parse()
24
+
25
+ def _read(self):
26
+ try:
27
+ with open(self.filepath, 'r') as f:
28
+ self._raw = f.readlines()
29
+
30
+ except UnicodeDecodeError:
31
+ warnings.warn("Couldn't read file with utf-8 encoding. Metadata might be corrupted.")
32
+ with open(self.filepath, 'r', errors='ignore') as f:
33
+ self._raw = f.readlines()
34
+
35
+ @property
36
+ def raw(self) -> 'list[str]':
37
+ return self._raw
38
+
39
+ @raw.setter
40
+ def raw(self, value):
41
+ raise ValueError("Cannot set")
42
+
43
+ @property
44
+ def parsed(self) -> dict:
45
+ return self._dict
46
+
47
+ def _parse(self):
48
+ lines = [line for line in self._raw] # Make a copy in case we need to use fallback plan
49
+
50
+ try:
51
+ self._dict = OrderedDict()
52
+ recursively_build_metadata(lines, self._dict)
53
+
54
+ except Exception:
55
+ print("Couldn't build nested dictionary. Fallback to simple dictionary.")
56
+ self._dict = OrderedDict()
57
+ self._parse_dict()
58
+
59
+
60
+ def _parse_dict(self) -> None:
61
+ pattern = re.compile(r"^([^:]+):\s*(.*)$")
62
+
63
+ for line in self._raw:
64
+ result = pattern.match(line)
65
+ if result:
66
+ key, value = result.groups()
67
+
68
+ if value.strip() != "":
69
+ self._dict[key] = value.strip()
70
+
71
+ def get_timezone(self) -> Optional[tzinfo]:
72
+ try:
73
+ zone = self._dict['Timezone']
74
+ except KeyError:
75
+ return None
76
+
77
+ if zone == 'UTC':
78
+ return timezone.utc
79
+ elif isinstance(zone, str):
80
+ seconds = get_utc_offset(zone.strip())
81
+ tz = timezone(timedelta(seconds=seconds))
82
+ return tz
83
+
84
+ def get_latitude(self) -> Optional[float]:
85
+ try:
86
+ return float(self._dict['Latitude'])
87
+ except KeyError:
88
+ return None
89
+
90
+ def get_longitude(self) -> Optional[float]:
91
+ try:
92
+ return float(self._dict['Longitude'])
93
+ except KeyError:
94
+ return None
95
+
96
+
97
+ def recursively_build_metadata(lines: list, odict: OrderedDict, depth:int=0) -> None:
98
+ """ A recursive function to build an OrderedDict from a list of lines.
99
+
100
+ The function expects lines to be in the format:
101
+ Key: Value
102
+ Key: Value
103
+ Key:
104
+ Subkey: Multi line Subvalue
105
+ Multi line Subvalue
106
+ Multi line Subvalue
107
+ Subkey: Subvalue
108
+ Subkey:
109
+ Subsubkey: Subsubvalue
110
+
111
+ Parameters
112
+ ----------
113
+ lines : list
114
+ A list of lines from a metadata file.
115
+ odict : OrderedDict
116
+ An OrderedDict to build.
117
+ depth : int, optional
118
+ The depth of the OrderedDict, by default 0
119
+
120
+ """
121
+ pattern = re.compile(r"^(\t*)([^:]+):\s*(.*)$")
122
+
123
+ while lines:
124
+ line = lines.pop(0)
125
+ result = pattern.match(line)
126
+
127
+ if result:
128
+ tabs, key, value = result.groups()
129
+
130
+ if len(tabs) < depth: # Un-indent, return to previous level
131
+ lines.insert(0, line)
132
+ return
133
+
134
+ if value.strip() != "": # Valid key:value pair
135
+ odict[key] = value.strip()
136
+
137
+ else: # Empty value, recurse
138
+ odict[key] = OrderedDict()
139
+ recursively_build_metadata(lines, odict[key], depth=depth+1)
140
+
141
+ else: # Multi-line value
142
+ try:
143
+ odict[next(reversed(odict))] = odict[next(reversed(odict))] + line
144
+ except StopIteration: # If no key:value pair has been added yet
145
+ continue
146
+ except TypeError: # If the value is not a string
147
+ continue
148
+ continue
tsp/labels.py CHANGED
@@ -1,4 +1,4 @@
1
- HOURLY = 60 * 60
2
- DAILY = HOURLY * 24
3
- MONTHLY = DAILY * 31
1
+ HOURLY = 60 * 60
2
+ DAILY = HOURLY * 24
3
+ MONTHLY = DAILY * 31
4
4
  YEARLY = DAILY * 365
tsp/misc.py CHANGED
@@ -1,90 +1,90 @@
1
- import numpy as np
2
- import pandas as pd
3
- import re
4
-
5
- import tsp.labels as lbl
6
-
7
-
8
- def _is_depth_column(col_name, pattern) -> bool:
9
- return bool(re.search(pattern, col_name))
10
-
11
-
12
- def completeness(df: pd.DataFrame, f1, f2) -> pd.DataFrame:
13
- """ Calculate completeness of an aggregated dataframe
14
- Parameters
15
- ----------
16
- df : pd.DataFrame
17
- Dataframe with temporal index and values equal to the number of observations
18
- in aggregation period
19
- f1 : str
20
- Aggregation period of data from which df is aggregated
21
- f2 : str
22
- Aggregation period of df
23
-
24
- Returns
25
- -------
26
- pd.DataFrame : Dataframe with completeness values as a decimal fraction [0,1]
27
- """
28
- # df must have temporal index
29
- C = None
30
- if f1 == lbl.HOURLY:
31
- if f2 == lbl.DAILY:
32
- C = df / 24
33
-
34
- elif f1 == lbl.DAILY:
35
- if f2 == lbl.MONTHLY:
36
- C = df / E_day_in_month(df)
37
- elif f2 == lbl.YEARLY:
38
- C = df / E_day_in_year(df)
39
-
40
- elif f1 == lbl.MONTHLY:
41
- if f2 == lbl.YEARLY:
42
- cnt = 12
43
-
44
- elif isinstance(f1, float) and isinstance(f1, float):
45
- R = f2 / f1
46
- C = df / R
47
-
48
- if C is None:
49
- raise ValueError(f"Unknown aggregation period {f1} or {f2}")
50
-
51
- return C
52
-
53
-
54
- def df_has_period(f, *args, **kwargs):
55
- df = args[0] if args[0] else kwargs.get('df')
56
- if not isinstance(df.index, pd.PeriodIndex):
57
- raise ValueError("Index must be a PeriodIndex")
58
- return f(*args, **kwargs)
59
-
60
-
61
- #@df_has_period
62
- def E_day_in_year(df: "pd.DataFrame") -> "pd.DataFrame":
63
- """ Expected number of daily observations per year """
64
- leap = df.index.to_period().is_leap_year
65
- days = np.atleast_2d(np.where(leap, 366, 365)).transpose()
66
- result = pd.DataFrame(index=df.index,
67
- columns=df.columns,
68
- data=np.repeat(np.atleast_2d(days), df.shape[1], axis=1))
69
- return result
70
-
71
-
72
- #@df_has_period
73
- def E_month_in_year(df: "pd.DataFrame") -> "pd.DataFrame":
74
- """ Expected number of monthly observations per year """
75
- result = pd.DataFrame(index=df.index,
76
- columns=df.columns,
77
- data=12)
78
- return result
79
-
80
-
81
- #@df_has_period
82
- def E_day_in_month(df: "pd.DataFrame") -> "pd.DataFrame":
83
- """ Expected number of daily observations per month """
84
- nday = df.index.to_period().days_in_month
85
- result = pd.DataFrame(index=df.index,
86
- columns=df.columns,
87
- data=np.repeat(np.atleast_2d(nday).transpose(), df.shape[1], axis=1))
88
- return result
89
-
90
-
1
+ import numpy as np
2
+ import pandas as pd
3
+ import re
4
+
5
+ import tsp.labels as lbl
6
+
7
+
8
+ def _is_depth_column(col_name, pattern) -> bool:
9
+ return bool(re.search(pattern, col_name))
10
+
11
+
12
+ def completeness(df: pd.DataFrame, f1, f2) -> pd.DataFrame:
13
+ """ Calculate completeness of an aggregated dataframe
14
+ Parameters
15
+ ----------
16
+ df : pd.DataFrame
17
+ Dataframe with temporal index and values equal to the number of observations
18
+ in aggregation period
19
+ f1 : str
20
+ Aggregation period of data from which df is aggregated
21
+ f2 : str
22
+ Aggregation period of df
23
+
24
+ Returns
25
+ -------
26
+ pd.DataFrame : Dataframe with completeness values as a decimal fraction [0,1]
27
+ """
28
+ # df must have temporal index
29
+ C = None
30
+ if f1 == lbl.HOURLY:
31
+ if f2 == lbl.DAILY:
32
+ C = df / 24
33
+
34
+ elif f1 == lbl.DAILY:
35
+ if f2 == lbl.MONTHLY:
36
+ C = df / E_day_in_month(df)
37
+ elif f2 == lbl.YEARLY:
38
+ C = df / E_day_in_year(df)
39
+
40
+ elif f1 == lbl.MONTHLY:
41
+ if f2 == lbl.YEARLY:
42
+ cnt = 12
43
+
44
+ elif isinstance(f1, float) and isinstance(f1, float):
45
+ R = f2 / f1
46
+ C = df / R
47
+
48
+ if C is None:
49
+ raise ValueError(f"Unknown aggregation period {f1} or {f2}")
50
+
51
+ return C
52
+
53
+
54
+ def df_has_period(f, *args, **kwargs):
55
+ df = args[0] if args[0] else kwargs.get('df')
56
+ if not isinstance(df.index, pd.PeriodIndex):
57
+ raise ValueError("Index must be a PeriodIndex")
58
+ return f(*args, **kwargs)
59
+
60
+
61
+ #@df_has_period
62
+ def E_day_in_year(df: "pd.DataFrame") -> "pd.DataFrame":
63
+ """ Expected number of daily observations per year """
64
+ leap = df.index.to_period().is_leap_year
65
+ days = np.atleast_2d(np.where(leap, 366, 365)).transpose()
66
+ result = pd.DataFrame(index=df.index,
67
+ columns=df.columns,
68
+ data=np.repeat(np.atleast_2d(days), df.shape[1], axis=1))
69
+ return result
70
+
71
+
72
+ #@df_has_period
73
+ def E_month_in_year(df: "pd.DataFrame") -> "pd.DataFrame":
74
+ """ Expected number of monthly observations per year """
75
+ result = pd.DataFrame(index=df.index,
76
+ columns=df.columns,
77
+ data=12)
78
+ return result
79
+
80
+
81
+ #@df_has_period
82
+ def E_day_in_month(df: "pd.DataFrame") -> "pd.DataFrame":
83
+ """ Expected number of daily observations per month """
84
+ nday = df.index.to_period().days_in_month
85
+ result = pd.DataFrame(index=df.index,
86
+ columns=df.columns,
87
+ data=np.repeat(np.atleast_2d(nday).transpose(), df.shape[1], axis=1))
88
+ return result
89
+
90
+