tsp 1.8.1__py3-none-any.whl → 1.10.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. tsp/__init__.py +11 -11
  2. tsp/__meta__.py +1 -1
  3. tsp/concatenation.py +159 -153
  4. tsp/core.py +1306 -1162
  5. tsp/data/2023-01-06_755-test-Dataset_2031-Constant_Over_Interval-Hourly-Ground_Temperature-Thermistor_Automated.timeserie.csv +4 -4
  6. tsp/data/2023-01-06_755-test.metadata.txt +208 -208
  7. tsp/data/NTGS_example_csv.csv +6 -6
  8. tsp/data/NTGS_example_slash_dates.csv +6 -6
  9. tsp/data/NTGS_gtr_example_excel.xlsx +0 -0
  10. tsp/data/example_geotop.csv +5240 -5240
  11. tsp/data/example_gtnp.csv +1298 -1298
  12. tsp/data/example_permos.csv +7 -7
  13. tsp/data/ntgs-db-multi.txt +3872 -0
  14. tsp/data/ntgs-db-single.txt +2251 -0
  15. tsp/data/test_geotop_has_space.txt +5 -5
  16. tsp/data/tsp_format_long.csv +10 -0
  17. tsp/data/tsp_format_wide_1.csv +7 -0
  18. tsp/data/tsp_format_wide_2.csv +7 -0
  19. tsp/dataloggers/AbstractReader.py +43 -43
  20. tsp/dataloggers/FG2.py +110 -110
  21. tsp/dataloggers/GP5W.py +114 -114
  22. tsp/dataloggers/Geoprecision.py +34 -34
  23. tsp/dataloggers/HOBO.py +930 -914
  24. tsp/dataloggers/RBRXL800.py +190 -190
  25. tsp/dataloggers/RBRXR420.py +371 -308
  26. tsp/dataloggers/Vemco.py +84 -0
  27. tsp/dataloggers/__init__.py +15 -15
  28. tsp/dataloggers/logr.py +196 -115
  29. tsp/dataloggers/test_files/004448.DAT +2543 -2543
  30. tsp/dataloggers/test_files/004531.DAT +17106 -17106
  31. tsp/dataloggers/test_files/004531.HEX +3587 -3587
  32. tsp/dataloggers/test_files/004534.HEX +3587 -3587
  33. tsp/dataloggers/test_files/010252.dat +1731 -1731
  34. tsp/dataloggers/test_files/010252.hex +1739 -1739
  35. tsp/dataloggers/test_files/010274.hex +1291 -1291
  36. tsp/dataloggers/test_files/010278.hex +3544 -3544
  37. tsp/dataloggers/test_files/012064.dat +1286 -1286
  38. tsp/dataloggers/test_files/012064.hex +1294 -1294
  39. tsp/dataloggers/test_files/012064_modified_start.hex +1294 -0
  40. tsp/dataloggers/test_files/012081.hex +3532 -3532
  41. tsp/dataloggers/test_files/013138_recovery_stamp.hex +1123 -0
  42. tsp/dataloggers/test_files/014037-2007.hex +95 -0
  43. tsp/dataloggers/test_files/019360_20160918_1146_SlumpIslandTopofHill.hex +11253 -0
  44. tsp/dataloggers/test_files/019360_20160918_1146_SlumpIslandTopofHill.xls +0 -0
  45. tsp/dataloggers/test_files/07B1592.DAT +1483 -1483
  46. tsp/dataloggers/test_files/07B1592.HEX +1806 -1806
  47. tsp/dataloggers/test_files/07B4450.DAT +2234 -2234
  48. tsp/dataloggers/test_files/07B4450.HEX +2559 -2559
  49. tsp/dataloggers/test_files/2022018_2025-09-18T22-16-16.txt +36 -0
  50. tsp/dataloggers/test_files/2022018_2025-09-18T22-16-16_raw.csv +2074 -0
  51. tsp/dataloggers/test_files/2022018_2025-09-18T22-16-16_temp.csv +2074 -0
  52. tsp/dataloggers/test_files/2025004_2025-12-02T17-07-28_cfg.txt +30 -0
  53. tsp/dataloggers/test_files/2025004_2025-12-02T17-07-28_raw.csv +35 -0
  54. tsp/dataloggers/test_files/2025004_2025-12-02T17-07-28_temp.csv +35 -0
  55. tsp/dataloggers/test_files/204087.xlsx +0 -0
  56. tsp/dataloggers/test_files/Asc-1455As02.000 +2982 -0
  57. tsp/dataloggers/test_files/Asc-1456As02.000 +2992 -0
  58. tsp/dataloggers/test_files/Asc-1457As02.000 +2917 -0
  59. tsp/dataloggers/test_files/BGC_BH15_019362_20140610_1253.hex +1729 -0
  60. tsp/dataloggers/test_files/Bin2944.csv +759 -0
  61. tsp/dataloggers/test_files/Bin5494.csv +2972 -0
  62. tsp/dataloggers/test_files/Bin6786.csv +272 -0
  63. tsp/dataloggers/test_files/FG2_399.csv +9881 -9881
  64. tsp/dataloggers/test_files/GP5W.csv +1121 -1121
  65. tsp/dataloggers/test_files/GP5W_260.csv +1884 -1884
  66. tsp/dataloggers/test_files/GP5W_270.csv +2210 -2210
  67. tsp/dataloggers/test_files/H08-030-08_HOBOware.csv +998 -998
  68. tsp/dataloggers/test_files/Minilog-II-T_350763_20190711_1.csv +2075 -0
  69. tsp/dataloggers/test_files/Minilog-II-T_350769_20190921_1.csv +6384 -0
  70. tsp/dataloggers/test_files/Minilog-II-T_354284_20190921_1.csv +4712 -0
  71. tsp/dataloggers/test_files/Minilog-T_7943_20140920_1.csv +5826 -0
  72. tsp/dataloggers/test_files/Minilog-T_8979_20140806_1.csv +2954 -0
  73. tsp/dataloggers/test_files/Minilog-T_975_20110824_1.csv +4343 -0
  74. tsp/dataloggers/test_files/RBR_01.dat +1046 -1046
  75. tsp/dataloggers/test_files/RBR_02.dat +2426 -2426
  76. tsp/dataloggers/test_files/RI03b_062831_20240905_1801.rsk +0 -0
  77. tsp/dataloggers/test_files/RI03b_062831_20240905_1801.xlsx +0 -0
  78. tsp/dataloggers/test_files/RSTDT2055.csv +2152 -2152
  79. tsp/dataloggers/test_files/U23-001_HOBOware.csv +1001 -1001
  80. tsp/dataloggers/test_files/hobo-negative-2.txt +6396 -6396
  81. tsp/dataloggers/test_files/hobo-negative-3.txt +5593 -5593
  82. tsp/dataloggers/test_files/hobo-positive-number-1.txt +1000 -1000
  83. tsp/dataloggers/test_files/hobo-positive-number-2.csv +1003 -1003
  84. tsp/dataloggers/test_files/hobo-positive-number-3.csv +1133 -1133
  85. tsp/dataloggers/test_files/hobo-positive-number-4.csv +1209 -1209
  86. tsp/dataloggers/test_files/hobo2.csv +8702 -8702
  87. tsp/dataloggers/test_files/hobo_1_AB.csv +21732 -21732
  88. tsp/dataloggers/test_files/hobo_1_AB_Details.txt +133 -133
  89. tsp/dataloggers/test_files/hobo_1_AB_classic.csv +4373 -4373
  90. tsp/dataloggers/test_files/hobo_1_AB_defaults.csv +21732 -21732
  91. tsp/dataloggers/test_files/hobo_1_AB_minimal.txt +1358 -1358
  92. tsp/dataloggers/test_files/hobo_1_AB_var2.csv +3189 -3189
  93. tsp/dataloggers/test_files/hobo_1_AB_var3.csv +2458 -2458
  94. tsp/dataloggers/test_files/logR_ULogC16-32_1.csv +106 -106
  95. tsp/dataloggers/test_files/logR_ULogC16-32_2.csv +100 -100
  96. tsp/dataloggers/test_files/mon_3_Ta_2010-08-18_2013-02-08.txt +21724 -21724
  97. tsp/dataloggers/test_files/rbr_001.dat +1133 -1133
  98. tsp/dataloggers/test_files/rbr_001.hex +1139 -1139
  99. tsp/dataloggers/test_files/rbr_001_no_comment.dat +1132 -1132
  100. tsp/dataloggers/test_files/rbr_001_no_comment.hex +1138 -1138
  101. tsp/dataloggers/test_files/rbr_002.dat +1179 -1179
  102. tsp/dataloggers/test_files/rbr_002.hex +1185 -1185
  103. tsp/dataloggers/test_files/rbr_003.hex +1292 -1292
  104. tsp/dataloggers/test_files/rbr_xl_001.DAT +1105 -1105
  105. tsp/dataloggers/test_files/rbr_xl_002.DAT +1126 -1126
  106. tsp/dataloggers/test_files/rbr_xl_003.DAT +4622 -4622
  107. tsp/dataloggers/test_files/rbr_xl_003.HEX +3587 -3587
  108. tsp/gtnp.py +148 -148
  109. tsp/labels.py +3 -3
  110. tsp/misc.py +90 -90
  111. tsp/physics.py +101 -101
  112. tsp/plots/static.py +388 -374
  113. tsp/readers.py +829 -548
  114. tsp/standardization/__init__.py +0 -0
  115. tsp/standardization/metadata.py +95 -0
  116. tsp/standardization/metadata_ref.py +0 -0
  117. tsp/standardization/validator.py +535 -0
  118. tsp/time.py +45 -45
  119. tsp/tspwarnings.py +27 -15
  120. tsp/utils.py +131 -101
  121. tsp/version.py +1 -1
  122. {tsp-1.8.1.dist-info → tsp-1.10.2.dist-info}/METADATA +95 -86
  123. tsp-1.10.2.dist-info/RECORD +132 -0
  124. {tsp-1.8.1.dist-info → tsp-1.10.2.dist-info}/licenses/LICENSE +674 -674
  125. {tsp-1.8.1.dist-info → tsp-1.10.2.dist-info}/top_level.txt +1 -0
  126. tsp-1.8.1.dist-info/RECORD +0 -94
  127. {tsp-1.8.1.dist-info → tsp-1.10.2.dist-info}/WHEEL +0 -0
tsp/__init__.py CHANGED
@@ -1,11 +1,11 @@
1
- from tsp.core import TSP, IndexedTSP
2
- from tsp.misc import _is_depth_column
3
-
4
- from tsp.plots.static import trumpet_curve, time_series, colour_contour
5
- from tsp.readers import read_gtnp, read_geotop, read_geoprecision, read_hoboware, read_ntgs, read_logr, read_csv, read_rbr
6
- from tsp.utils import resolve_duplicate_times
7
- from tsp.version import version as __version__
8
-
9
- #TSP.__module__ = "teaspoon"
10
-
11
- __all__ = ["TSP", "IndexedTSP"]
1
+ from tsp.core import TSP, IndexedTSP
2
+ from tsp.misc import _is_depth_column
3
+
4
+ from tsp.plots.static import trumpet_curve, time_series, colour_contour
5
+ from tsp.readers import read_gtnp, read_geotop, read_geoprecision, read_hoboware, read_ntgs, read_logr, read_csv, read_rbr
6
+ from tsp.utils import resolve_duplicate_times
7
+ from tsp.version import version as __version__
8
+
9
+ #TSP.__module__ = "teaspoon"
10
+
11
+ __all__ = ["TSP", "IndexedTSP"]
tsp/__meta__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  # Automatically created. Please do not edit.
2
- __version__ = '1.8.1'
2
+ __version__ = '1.10.2'
3
3
  __author__ = 'Nick Brown'
tsp/concatenation.py CHANGED
@@ -1,153 +1,159 @@
1
- from __future__ import annotations
2
-
3
- import numpy as np
4
- import pandas as pd
5
-
6
- from typing import Any, TYPE_CHECKING
7
-
8
-
9
- if TYPE_CHECKING:
10
- from tsp import TSP
11
-
12
-
13
- def _tsp_concat(tsp_list: "list[TSP]", on_conflict="error", metadata='first') -> dict[str, Any]:
14
- """ Helper for core.tsp_concat """
15
- # Validate the TSPs in the list
16
- _validate_tsp_list(tsp_list)
17
-
18
- # Combine the TSPs
19
- dfs = [t.wide for t in tsp_list]
20
- combined_df = _concat_deduplicate(dfs, on_conflict=on_conflict)
21
-
22
- # Combine metadata
23
- if metadata == 'first':
24
- metadata = {key:val for key, val in tsp_list[0].metadata.items()}
25
- latitude = tsp_list[0].latitude
26
- longitude = tsp_list[0].longitude
27
- site_id = tsp_list[0].site_id
28
-
29
- elif metadata == 'identical':
30
- metadata = {key:val for key, val in tsp_list[0].metadata.items()}
31
- for key, val in metadata.items():
32
- for t in tsp_list[1:]:
33
- if key not in t.metadata or t.metadata[key] != val:
34
- _ = metadata.pop(key)
35
- latitude = _none_if_not_identical([t.latitude for t in tsp_list])
36
- longitude = _none_if_not_identical([t.longitude for t in tsp_list])
37
- site_id = _none_if_not_identical([t.site_id for t in tsp_list])
38
-
39
- elif metadata == 'none':
40
- metadata = None
41
- latitude, longitude, site_id = None, None, None
42
-
43
- else:
44
- raise ValueError(f"Unknown metadata method: {metadata}")
45
-
46
- #final_tsp = TSP(times=combined_df.index, values=combined_df.values, depths=combined_df.columns,
47
- # latitude=latitude, longitude=longitude,
48
- # site_id=site_id, metadata=metadata)
49
- try:
50
- combined_df.drop('time', axis=1, inplace=True)
51
- except KeyError:
52
- Warning("Deprecation Error: The 'time' column is no longer used in TSP objects. Please update your code to avoid this warning.")
53
-
54
- tsp_dict = {
55
- 'times': combined_df.index,
56
- 'values': combined_df.values,
57
- 'depths': combined_df.columns,
58
- 'latitude': latitude,
59
- 'longitude': longitude,
60
- 'site_id': site_id,
61
- 'metadata': metadata
62
- }
63
- return tsp_dict
64
-
65
-
66
- def _none_if_not_identical(list):
67
- """Check if all elements in the list are identical. If they are, return the first element; otherwise, return None."""
68
- first = list[0]
69
- for item in list[1:]:
70
- if item != first:
71
- return None
72
- return first
73
-
74
-
75
- def _validate_tsp_list(tsp_list: "list[TSP]"):
76
- """Check that all TSPs in the list have the same depths."""
77
- depths0 = tsp_list[0].depths
78
- for t in tsp_list[1:]:
79
- if not np.array_equal(depths0, t.depths):
80
- raise ValueError("All TSPs must have the same depths.")
81
-
82
-
83
- def _concat_deduplicate(df_list, on_conflict='error'):
84
- """
85
- Concatenates a list of DataFrames, handling duplicate indices based on row values.
86
-
87
- Args:
88
- df_list (list): A list of pandas DataFrames. Assumes they have identical
89
- column names.
90
- on_conflict (str): Specifies how to handle duplicate indices with
91
- unequal row values.
92
- - 'error': Raise a ValueError (default).
93
- - 'keep_first': Keep the row corresponding to the first
94
- DataFrame in the list where the index appeared.
95
-
96
- Returns:
97
- pandas.DataFrame: The concatenated DataFrame with duplicates handled
98
- according to the specified rules.
99
-
100
- Raises:
101
- ValueError: If df_list is empty.
102
- ValueError: If on_conflict is not 'error' or 'keep_first'.
103
- ValueError: If on_conflict='error' and duplicate indices with
104
- non-identical row values are found.
105
- """
106
- if not df_list:
107
- raise ValueError("Input DataFrame list cannot be empty.")
108
-
109
- if on_conflict not in ['error', 'keep_first']:
110
- raise ValueError("on_conflict must be either 'error' or 'keep_first'")
111
-
112
- # Store original index name if it exists
113
- original_index_name = df_list[0].index.name
114
-
115
- # Concatenate all DataFrames. The order is preserved.
116
- combined_df = pd.concat(df_list, ignore_index=False) # Keep original indices
117
-
118
- temp_index_col = "__temp_index__"
119
- combined_reset = combined_df.reset_index(names=temp_index_col)
120
-
121
- # Drop rows that are duplicates based on *all* columns
122
- deduplicated_reset = combined_reset.drop_duplicates(keep='first')
123
-
124
- # Check for remaining duplicates *only* in the original index column.
125
- remaining_duplicates_mask = deduplicated_reset.duplicated(subset=temp_index_col, keep=False)
126
-
127
- if remaining_duplicates_mask.any():
128
- # We have indices that appeared multiple times with different values.
129
- if on_conflict == 'error':
130
- conflicting_indices = deduplicated_reset.loc[remaining_duplicates_mask, temp_index_col].unique()
131
- raise ValueError(
132
- f"Duplicate indices with non-identical values found: "
133
- f"{list(conflicting_indices)}. Use on_conflict='keep_first' to keep "
134
- f"the first occurrence."
135
- )
136
- elif on_conflict == 'keep_first':
137
- # Drop the later occurrences of these conflicting index values.
138
- # Since 'deduplicated_reset' preserved the first unique (index, row_value)
139
- # combination, dropping duplicates based solely on the index column
140
- # while keeping the first achieves the desired outcome.
141
- final_reset = deduplicated_reset.drop_duplicates(subset=temp_index_col, keep='first')
142
- else:
143
- pass
144
- else:
145
- # No conflicting duplicates (duplicate indices with different values) were found.
146
- final_reset = deduplicated_reset
147
-
148
- final_df = final_reset.set_index(temp_index_col)
149
- final_df.index.name = original_index_name
150
- # Sort by time, ascending
151
- final_df.sort_index(inplace=True)
152
-
153
- return final_df
1
+ from __future__ import annotations
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+
6
+ from typing import Any, TYPE_CHECKING
7
+
8
+
9
+ if TYPE_CHECKING:
10
+ from tsp import TSP
11
+
12
+
13
+ def _tsp_concat(tsp_list: "list[TSP]", on_conflict="error", metadata='first') -> dict[str, Any]:
14
+ """ Helper for core.tsp_concat """
15
+ # Validate the TSPs in the list
16
+ _validate_tsp_list(tsp_list)
17
+
18
+ # Combine the TSPs
19
+ dfs = [t.wide for t in tsp_list]
20
+ combined_df = _concat_deduplicate(dfs, on_conflict=on_conflict)
21
+ combined_counts = _concat_deduplicate([pd.DataFrame(t.counts, columns=t.depths, index=t.wide.index) for t in tsp_list], on_conflict=on_conflict).values
22
+
23
+ # Combine metadata
24
+ if metadata == 'first':
25
+ metadata = {key:val for key, val in tsp_list[0].metadata.items()}
26
+ latitude = tsp_list[0].latitude
27
+ longitude = tsp_list[0].longitude
28
+ site_id = tsp_list[0].site_id
29
+
30
+ elif metadata == 'identical':
31
+ metadata = {key:val for key, val in tsp_list[0].metadata.items()}
32
+ for key, val in metadata.items():
33
+ for t in tsp_list[1:]:
34
+ if key not in t.metadata or t.metadata[key] != val:
35
+ _ = metadata.pop(key)
36
+ latitude = _none_if_not_identical([t.latitude for t in tsp_list])
37
+ longitude = _none_if_not_identical([t.longitude for t in tsp_list])
38
+ site_id = _none_if_not_identical([t.site_id for t in tsp_list])
39
+
40
+ elif metadata == 'none':
41
+ metadata = None
42
+ latitude, longitude, site_id = None, None, None
43
+
44
+ else:
45
+ raise ValueError(f"Unknown metadata method: {metadata}")
46
+
47
+ #final_tsp = TSP(times=combined_df.index, values=combined_df.values, depths=combined_df.columns,
48
+ # latitude=latitude, longitude=longitude,
49
+ # site_id=site_id, metadata=metadata)
50
+ try:
51
+ combined_df.drop('time', axis=1, inplace=True)
52
+ except KeyError:
53
+ Warning("Deprecation Error: The 'time' column is no longer used in TSP objects. Please update your code to avoid this warning.")
54
+
55
+ tsp_dict = {
56
+ 'times': combined_df.index,
57
+ 'values': combined_df.values,
58
+ 'depths': combined_df.columns,
59
+ 'latitude': latitude,
60
+ 'longitude': longitude,
61
+ 'site_id': site_id,
62
+ 'metadata': metadata,
63
+ 'counts': combined_counts
64
+ }
65
+ return tsp_dict
66
+
67
+
68
+ def _none_if_not_identical(list):
69
+ """Check if all elements in the list are identical. If they are, return the first element; otherwise, return None."""
70
+ first = list[0]
71
+ for item in list[1:]:
72
+ if item != first:
73
+ return None
74
+ return first
75
+
76
+
77
+ def _validate_tsp_list(tsp_list: "list[TSP]"):
78
+ """Check that all TSPs in the list have the same depths."""
79
+ depths0 = tsp_list[0].depths
80
+ for t in tsp_list[1:]:
81
+ if not np.array_equal(depths0, t.depths):
82
+ raise ValueError("All TSPs must have the same depths.")
83
+
84
+
85
+ def _concat_deduplicate(df_list, on_conflict='error'):
86
+ """
87
+ Concatenates a list of DataFrames, handling duplicate indices based on row values.
88
+
89
+ Args:
90
+ df_list (list): A list of pandas DataFrames. Assumes they have identical
91
+ column names.
92
+ on_conflict (str): Specifies how to handle duplicate indices with
93
+ unequal row values.
94
+ - 'error': Raise a ValueError (default).
95
+ - 'keep_first': Keep the row corresponding to the first
96
+ DataFrame in the list where the index appeared.
97
+ - 'keep_last': Keep the row corresponding to the last
98
+ DataFrame in the list where the index appeared.
99
+
100
+ Returns:
101
+ pandas.DataFrame: The concatenated DataFrame with duplicates handled
102
+ according to the specified rules.
103
+
104
+ Raises:
105
+ ValueError: If df_list is empty.
106
+ ValueError: If on_conflict is not 'error', 'keep_first', or 'keep_last'.
107
+ ValueError: If on_conflict='error' and duplicate indices with
108
+ non-identical row values are found.
109
+ """
110
+ if not df_list:
111
+ raise ValueError("Input DataFrame list cannot be empty.")
112
+
113
+ if on_conflict not in ['error', 'keep_first', 'keep_last']:
114
+ raise ValueError("on_conflict must be either 'error', 'keep_first', or 'keep_last'")
115
+
116
+ # Store original index name if it exists
117
+ original_index_name = df_list[0].index.name
118
+
119
+ # Concatenate all DataFrames. The order is preserved.
120
+ combined_df = pd.concat(df_list, ignore_index=False) # Keep original indices
121
+
122
+ temp_index_col = "__temp_index__"
123
+ combined_reset = combined_df.reset_index(names=temp_index_col)
124
+
125
+ # Drop rows that are duplicates based on *all* columns
126
+ deduplicated_reset = combined_reset.drop_duplicates(keep='first')
127
+
128
+ # Check for remaining duplicates *only* in the original index column.
129
+ remaining_duplicates_mask = deduplicated_reset.duplicated(subset=temp_index_col, keep=False)
130
+
131
+ if remaining_duplicates_mask.any():
132
+ # We have indices that appeared multiple times with different values.
133
+ if on_conflict == 'error':
134
+ conflicting_indices = deduplicated_reset.loc[remaining_duplicates_mask, temp_index_col].unique()
135
+ raise ValueError(
136
+ f"Duplicate indices with non-identical values found: "
137
+ f"{list(conflicting_indices)}. Use on_conflict='keep_first' to keep "
138
+ f"the first occurrence."
139
+ )
140
+ elif on_conflict == 'keep_first':
141
+ # Drop the later occurrences of these conflicting index values.
142
+ # Since 'deduplicated_reset' preserved the first unique (index, row_value)
143
+ # combination, dropping duplicates based solely on the index column
144
+ # while keeping the first achieves the desired outcome.
145
+ final_reset = deduplicated_reset.drop_duplicates(subset=temp_index_col, keep='first')
146
+ elif on_conflict == 'keep_last':
147
+ final_reset = deduplicated_reset.drop_duplicates(subset=temp_index_col, keep='last')
148
+ else:
149
+ pass
150
+ else:
151
+ # No conflicting duplicates (duplicate indices with different values) were found.
152
+ final_reset = deduplicated_reset
153
+
154
+ final_df = final_reset.set_index(temp_index_col)
155
+ final_df.index.name = original_index_name
156
+ # Sort by time, ascending
157
+ final_df.sort_index(inplace=True)
158
+
159
+ return final_df