tobac 1.6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. tobac/__init__.py +112 -0
  2. tobac/analysis/__init__.py +31 -0
  3. tobac/analysis/cell_analysis.py +628 -0
  4. tobac/analysis/feature_analysis.py +212 -0
  5. tobac/analysis/spatial.py +619 -0
  6. tobac/centerofgravity.py +226 -0
  7. tobac/feature_detection.py +1758 -0
  8. tobac/merge_split.py +324 -0
  9. tobac/plotting.py +2321 -0
  10. tobac/segmentation/__init__.py +10 -0
  11. tobac/segmentation/watershed_segmentation.py +1316 -0
  12. tobac/testing.py +1179 -0
  13. tobac/tests/segmentation_tests/test_iris_xarray_segmentation.py +0 -0
  14. tobac/tests/segmentation_tests/test_segmentation.py +1183 -0
  15. tobac/tests/segmentation_tests/test_segmentation_time_pad.py +104 -0
  16. tobac/tests/test_analysis_spatial.py +1109 -0
  17. tobac/tests/test_convert.py +265 -0
  18. tobac/tests/test_datetime.py +216 -0
  19. tobac/tests/test_decorators.py +148 -0
  20. tobac/tests/test_feature_detection.py +1321 -0
  21. tobac/tests/test_generators.py +273 -0
  22. tobac/tests/test_import.py +24 -0
  23. tobac/tests/test_iris_xarray_match_utils.py +244 -0
  24. tobac/tests/test_merge_split.py +351 -0
  25. tobac/tests/test_pbc_utils.py +497 -0
  26. tobac/tests/test_sample_data.py +197 -0
  27. tobac/tests/test_testing.py +747 -0
  28. tobac/tests/test_tracking.py +714 -0
  29. tobac/tests/test_utils.py +650 -0
  30. tobac/tests/test_utils_bulk_statistics.py +789 -0
  31. tobac/tests/test_utils_coordinates.py +328 -0
  32. tobac/tests/test_utils_internal.py +97 -0
  33. tobac/tests/test_xarray_utils.py +232 -0
  34. tobac/tracking.py +613 -0
  35. tobac/utils/__init__.py +27 -0
  36. tobac/utils/bulk_statistics.py +360 -0
  37. tobac/utils/datetime.py +184 -0
  38. tobac/utils/decorators.py +540 -0
  39. tobac/utils/general.py +753 -0
  40. tobac/utils/generators.py +87 -0
  41. tobac/utils/internal/__init__.py +2 -0
  42. tobac/utils/internal/coordinates.py +430 -0
  43. tobac/utils/internal/iris_utils.py +462 -0
  44. tobac/utils/internal/label_props.py +82 -0
  45. tobac/utils/internal/xarray_utils.py +439 -0
  46. tobac/utils/mask.py +364 -0
  47. tobac/utils/periodic_boundaries.py +419 -0
  48. tobac/wrapper.py +244 -0
  49. tobac-1.6.2.dist-info/METADATA +154 -0
  50. tobac-1.6.2.dist-info/RECORD +53 -0
  51. tobac-1.6.2.dist-info/WHEEL +5 -0
  52. tobac-1.6.2.dist-info/licenses/LICENSE +29 -0
  53. tobac-1.6.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,360 @@
1
+ """
2
+ Support functions to compute bulk statistics of features, either as a postprocessing step
3
+ or within feature detection or segmentation.
4
+
5
+ """
6
+
7
+ from __future__ import annotations
8
+ from datetime import timedelta
9
+ import logging
10
+ import warnings
11
+ from functools import partial
12
+ from typing import Callable, Optional, Union
13
+
14
+ import numpy as np
15
+
16
+ from tobac.utils.generators import field_and_features_over_time
17
+
18
+ # numpy renamed core to _core recently
19
+ try:
20
+ from numpy._core import multiarray as mu
21
+ except ModuleNotFoundError:
22
+ from numpy.core import multiarray as mu
23
+ import pandas as pd
24
+ import xarray as xr
25
+ from tobac.utils import decorators
26
+
27
+
28
+ def get_statistics(
29
+ features: pd.DataFrame,
30
+ labels: np.ndarray[int],
31
+ *fields: tuple[xr.DataArray],
32
+ statistic: dict[str, Union[Callable, tuple[Callable, dict]]] = {
33
+ "ncells": np.count_nonzero
34
+ },
35
+ index: Union[None, list[int]] = None,
36
+ default: Union[None, float] = None,
37
+ id_column: str = "feature",
38
+ collapse_axis: Union[None, int, list[int]] = None,
39
+ ) -> pd.DataFrame:
40
+ """Get bulk statistics for objects (e.g. features or segmented features)
41
+ given a labelled mask of the objects and any input field with the same
42
+ dimensions or that can be broadcast with labels according to numpy-like
43
+ broadcasting rules.
44
+
45
+ The statistics are added as a new column to the existing feature dataframe.
46
+ Users can specify which statistics are computed by providing a dictionary
47
+ with the column name of the metric and the respective function.
48
+
49
+ Parameters
50
+ ----------
51
+ features: pd.DataFrame
52
+ Dataframe with features or segmented features (output from feature
53
+ detection or segmentation), which can be for the specific timestep or
54
+ for the whole dataset
55
+
56
+ labels : np.ndarray[int]
57
+ Mask with labels of each regions to apply function to (e.g. output of
58
+ segmentation for a specific timestep)
59
+
60
+ *fields : tuple[xr.DataArray]
61
+ Fields to give as arguments to each function call. If the shape does not
62
+ match that of labels, numpy-style broadcasting will be applied.
63
+
64
+ statistic: dict[str, Callable], optional (default: {'ncells':np.count_nonzero})
65
+ Dictionary with function(s) to apply over each region as values and the
66
+ name of the respective statistics as keys. Default is to just count the
67
+ number of cells associated with each feature and write it to the feature
68
+ dataframe.
69
+
70
+ index: None | list[int], optional (default: None)
71
+ list of indices of regions in labels to apply function to. If None, will
72
+ default to all integer feature labels in labels.
73
+
74
+ default: None | float, optional (default: None)
75
+ default value to return in a region that has no values.
76
+
77
+ id_column: str, optional (default: "feature")
78
+ Name of the column in feature dataframe that contains IDs that match with
79
+ the labels in mask. The default is the column "feature".
80
+
81
+ collapse_axis: None | int | list[int], optional (default: None):
82
+ Index or indices of axes of labels to collapse. This will reduce the dimensionality of labels
83
+ while allowing labelled features to overlap. This can be used, for example, to calculate the
84
+ footprint area (2D) of 3D labels
85
+
86
+
87
+ Returns
88
+ -------
89
+ features: pd.DataFrame
90
+ Updated feature dataframe with bulk statistics for each feature saved
91
+ in a new column.
92
+ """
93
+
94
+ # if mask and input data dimensions do not match we can broadcast using numpy broadcasting rules
95
+ if collapse_axis is not None:
96
+ # Test if iterable and if not make a list
97
+ try:
98
+ collapse_axis = list(iter(collapse_axis))
99
+ except TypeError:
100
+ collapse_axis = [collapse_axis]
101
+
102
+ # Normalise axes to handle negative axis number conventions
103
+ ndim = len(labels.shape)
104
+ collapse_axis = [mu.normalize_axis_index(axis, ndim) for axis in collapse_axis]
105
+ uncollapsed_axes = [
106
+ i for i, _ in enumerate(labels.shape) if i not in collapse_axis
107
+ ]
108
+ if not len(uncollapsed_axes):
109
+ raise ValueError("Cannot collapse all axes of labels")
110
+ collapsed_shape = tuple(
111
+ [s for i, s in enumerate(labels.shape) if i not in collapse_axis]
112
+ )
113
+ broadcast_flag = any([collapsed_shape != field.shape for field in fields])
114
+ if broadcast_flag:
115
+ raise ValueError("Broadcasting not supported with collapse_axis")
116
+
117
+ else:
118
+ broadcast_flag = any([labels.shape != field.shape for field in fields])
119
+ if broadcast_flag:
120
+ # Broadcast input labels and fields to ensure they work according to numpy broadcasting rules
121
+ broadcast_fields = np.broadcast_arrays(labels, *fields)
122
+ labels = broadcast_fields[0]
123
+ fields = broadcast_fields[1:]
124
+
125
+ # mask must contain positive values to calculate statistics
126
+ if np.any(labels > 0):
127
+ if index is None:
128
+ index = features[id_column].to_numpy().astype(int)
129
+ else:
130
+ # get the statistics only for specified feature objects
131
+ if np.max(index) > np.max(labels):
132
+ raise ValueError("Index contains values that are not in labels!")
133
+
134
+ # Find which labels exist in features for output:
135
+ index_in_features = np.isin(index, features[id_column])
136
+
137
+ # set negative markers to 0 as they are unsegmented
138
+ bins = np.cumsum(np.bincount(np.maximum(labels.ravel(), 0)))
139
+ argsorted = np.argsort(labels.ravel())
140
+
141
+ # Create lambdas to get (ravelled) label locations using argsorted and bins
142
+ if collapse_axis is None:
143
+ label_locs = lambda i: argsorted[bins[i - 1] : bins[i]]
144
+ else:
145
+ # Collapse ravelled locations to the remaining axes
146
+ label_locs = lambda i: np.unique(
147
+ np.ravel_multi_index(
148
+ np.array(
149
+ np.unravel_index(argsorted[bins[i - 1] : bins[i]], labels.shape)
150
+ )[uncollapsed_axes],
151
+ collapsed_shape,
152
+ )
153
+ )
154
+
155
+ # apply each function given per statistic parameter for the labeled regions sorted in ascending order
156
+ for stats_name in statistic.keys():
157
+ # if function is given as a tuple, take the input parameters provided
158
+ if type(statistic[stats_name]) is tuple:
159
+ # assure that key word arguments are provided as dictionary
160
+ if not type(statistic[stats_name][1]) is dict:
161
+ raise TypeError(
162
+ "Tuple must contain dictionary with key word arguments for function."
163
+ )
164
+
165
+ func = partial(statistic[stats_name][0], **statistic[stats_name][1])
166
+ else:
167
+ func = statistic[stats_name]
168
+
169
+ # default needs to be sequence when function output is array-like
170
+ output = func(*([np.random.rand(10)] * len(fields)))
171
+ if hasattr(output, "__len__"):
172
+ default = np.full(output.shape, default)
173
+
174
+ stats = np.array(
175
+ [
176
+ (
177
+ func(*(field.ravel()[label_locs(i)] for field in fields))
178
+ if i < bins.size and bins[i] > bins[i - 1]
179
+ else default
180
+ )
181
+ for i in index
182
+ ]
183
+ )
184
+
185
+ # add results of computed statistics to feature dataframe with column name given per statistic
186
+ # initiate new column in feature dataframe if it does not already exist
187
+ if stats_name not in features.columns:
188
+ if default is not None and not hasattr(default, "__len__"):
189
+ # If result is a scalar value we can create an empty column with the correct dtype
190
+ features[stats_name] = np.full(
191
+ [len(features)], default, type(default)
192
+ )
193
+ else:
194
+ features[stats_name] = np.full([len(features)], None, object)
195
+
196
+ for idx, label in enumerate(index):
197
+ if index_in_features[idx]:
198
+ # test if values are scalars
199
+ if not hasattr(stats[idx], "__len__"):
200
+ # if yes, we can just assign the value to the new column and row of the respective feature
201
+ features.loc[features[id_column] == label, stats_name] = stats[
202
+ idx
203
+ ]
204
+ # if stats output is array-like it has to be added in a different way
205
+ else:
206
+ df = pd.DataFrame({stats_name: [stats[idx]]})
207
+ # get row index rather than pd.Dataframe index value since we need to use .iloc indexing
208
+ row_idx = np.where(features[id_column] == label)[0]
209
+ features.iloc[
210
+ row_idx,
211
+ features.columns.get_loc(stats_name),
212
+ ] = df.apply(lambda r: tuple(r), axis=1)
213
+
214
+ return features
215
+
216
+
217
+ @decorators.iris_to_xarray()
218
+ def get_statistics_from_mask(
219
+ features: pd.DataFrame,
220
+ segmentation_mask: xr.DataArray,
221
+ *fields: tuple[xr.DataArray],
222
+ statistic: dict[str, tuple[Callable]] = {"Mean": np.mean},
223
+ index: Union[None, list[int]] = None,
224
+ default: Union[None, float] = None,
225
+ id_column: str = "feature",
226
+ collapse_dim: Union[None, str, list[str]] = None,
227
+ time_var_name: str = "time",
228
+ time_padding: Optional[timedelta] = None,
229
+ ) -> pd.DataFrame:
230
+ """Derives bulk statistics for each object in the segmentation mask, and
231
+ returns a features Dataframe with these properties for each feature.
232
+
233
+ Parameters
234
+ ----------
235
+ features: pd.DataFrame
236
+ Dataframe with segmented features (output from feature detection or
237
+ segmentation). Timesteps must not be exactly the same as in segmentation
238
+ mask but all labels in the mask need to be present in the feature
239
+ dataframe.
240
+
241
+ segmentation_mask : xr.DataArray
242
+ Segmentation mask output
243
+
244
+ *fields : tuple[xr.DataArray]
245
+ Field(s) with input data. If field does not have a time dimension it
246
+ will be considered time invariant, and the entire field will be passed
247
+ for each time step in segmentation_mask. If the shape does not match
248
+ that of labels, numpy-style broadcasting will be applied.
249
+
250
+ statistic: dict[str, Callable], optional (default: {'ncells':np.count_nonzero})
251
+ Dictionary with function(s) to apply over each region as values and the
252
+ name of the respective statistics as keys. Default is to calculate the
253
+ mean value of the field over each feature.
254
+
255
+ index: None | list[int], optional (default: None)
256
+ list of indexes of regions in labels to apply function to. If None, will
257
+ default to all integers between 1 and the maximum value in labels
258
+
259
+ default: None | float, optional (default: None)
260
+ default value to return in a region that has no values
261
+
262
+ id_column: str, optional (default: "feature")
263
+ Name of the column in feature dataframe that contains IDs that match with the labels in mask. The default is the column "feature".
264
+
265
+ collapse_dim: None | str | list[str], optional (default: None)
266
+ Dimension names of labels to collapse, allowing, e.g. calulcation of statistics on 2D
267
+ fields for the footprint of 3D objects
268
+
269
+ time_var_name : str, optional (default: "time")
270
+ The name of the time dimension in the input fields and the time column
271
+ in features, by default "time"
272
+
273
+ time_padding: timedelta, optional (default: None)
274
+ If set, allows for statistics to be associated with a feature input
275
+ timestep that is within time_padding off of the feature. Extremely useful when
276
+ converting between micro- and nanoseconds, as is common when using Pandas
277
+ dataframes.
278
+
279
+ Returns
280
+ -------
281
+ features: pd.DataFrame
282
+ Updated feature dataframe with bulk statistics for each feature saved in a new column
283
+ """
284
+ # warning when feature labels are not unique in dataframe
285
+ if not features[id_column].is_unique:
286
+ logging.warning(
287
+ "Feature labels are not unique which may cause unexpected results for the computation of bulk statistics."
288
+ )
289
+ # extra warning when feature labels are not unique in timestep
290
+ uniques = features.groupby("time")[id_column].value_counts().values
291
+ if not uniques[uniques > 1].size == 0:
292
+ logging.warning(
293
+ "Note that non-unique feature labels occur also in the same timestep. This likely causes unexpected results for the computation of bulk statistics."
294
+ )
295
+
296
+ if collapse_dim is not None:
297
+ if isinstance(collapse_dim, str):
298
+ collapse_dim = [collapse_dim]
299
+ non_time_dims = [dim for dim in segmentation_mask.dims if dim != "time"]
300
+ collapse_axis = [
301
+ i for i, dim in enumerate(non_time_dims) if dim in collapse_dim
302
+ ]
303
+ if len(collapse_dim) != len(collapse_axis):
304
+ raise ValueError(
305
+ "One or more of collapse_dim not found in dimensions of segmentation_mask"
306
+ )
307
+ else:
308
+ collapse_axis = None
309
+
310
+ # check if any of the feature dataframe input values match with segmentaion mask IDs
311
+ if not np.any(np.isin(features[id_column], np.unique(segmentation_mask))):
312
+ raise ValueError(
313
+ "The labels of the segmentation mask and the feature dataframe do not seem to match. Please make sure you provide the correct input feature dataframe to calculate the bulk statistics."
314
+ )
315
+
316
+ # get bulk statistics for each timestep
317
+ step_statistics = []
318
+
319
+ for _, tt, segmentation_mask_t, features_t in field_and_features_over_time(
320
+ segmentation_mask,
321
+ features,
322
+ time_var_name=time_var_name,
323
+ time_padding=time_padding,
324
+ ):
325
+ # select specific timestep
326
+ fields_t = (
327
+ (
328
+ field.sel(
329
+ {time_var_name: tt}, method="nearest", tolerance=time_padding
330
+ ).values
331
+ if time_var_name in field.coords
332
+ else field.values
333
+ )
334
+ for field in fields
335
+ )
336
+
337
+ # make sure that the labels in the segmentation mask exist in feature dataframe
338
+ # continue loop because not all timesteps might have matching IDs
339
+ if not np.any(np.isin(features_t[id_column], np.unique(segmentation_mask_t))):
340
+ warnings.warn("Not all timesteps have matching features", UserWarning)
341
+ step_statistics.append(features_t.copy())
342
+ continue
343
+ else:
344
+ # make sure that features are not double-defined
345
+ step_statistics.append(
346
+ get_statistics(
347
+ features_t.copy(),
348
+ segmentation_mask_t.values.astype(np.int64),
349
+ *fields_t,
350
+ statistic=statistic,
351
+ default=default,
352
+ index=index,
353
+ id_column=id_column,
354
+ collapse_axis=collapse_axis,
355
+ )
356
+ )
357
+
358
+ features = pd.concat(step_statistics)
359
+
360
+ return features
@@ -0,0 +1,184 @@
1
+ """Functions for converting between and working with different datetime formats"""
2
+
3
+ from typing import Union
4
+ import datetime
5
+ import numpy as np
6
+ import pandas as pd
7
+ import xarray as xr
8
+ import cftime
9
+
10
+
11
+ def to_cftime(
12
+ dates: Union[str, datetime.datetime, np.datetime64, pd.Timestamp, cftime.datetime],
13
+ calendar: str,
14
+ align_on: str = "date",
15
+ ) -> cftime.datetime:
16
+ """Converts a provided datetime-like object to a cftime datetime with the
17
+ given calendar
18
+
19
+ Parameters
20
+ ----------
21
+ dates : Union[str, datetime.datetime, np.datetime64, pd.Timestamp, cftime.datetime]
22
+ A datetime-like object or array of datetime-like objects to be converted
23
+ calendar : str
24
+ The requested cftime calender
25
+ align_on : str, optional
26
+ The 'align-on' parameter required for 360-day, 365-day and 366-day
27
+ cftime dates, by default "date"
28
+
29
+ Returns
30
+ -------
31
+ cftime.datetime
32
+ A cftime object or array of cftime objects in the requested calendar
33
+ """
34
+ dates_arr = np.atleast_1d(dates)
35
+ if isinstance(dates_arr[0], cftime.datetime):
36
+ cftime_dates = (
37
+ xr.DataArray(dates_arr, {"time": dates_arr})
38
+ .convert_calendar(calendar, use_cftime=True, align_on=align_on)
39
+ .time.values
40
+ )
41
+ else:
42
+ cftime_dates = (
43
+ xr.DataArray(dates_arr, {"time": pd.to_datetime(dates_arr)})
44
+ .convert_calendar(calendar, use_cftime=True, align_on=align_on)
45
+ .time.values
46
+ )
47
+ if not hasattr(dates, "__iter__") or isinstance(dates, str) and len(cftime_dates):
48
+ return cftime_dates[0]
49
+ return cftime_dates
50
+
51
+
52
+ def to_timestamp(
53
+ dates: Union[str, datetime.datetime, np.datetime64, pd.Timestamp, cftime.datetime],
54
+ ) -> pd.Timestamp:
55
+ """Converts a provided datetime-like object to a pandas timestamp
56
+
57
+ Parameters
58
+ ----------
59
+ dates : Union[str, datetime.datetime, np.datetime64, pd.Timestamp, cftime.datetime]
60
+ A datetime-like object or array of datetime-like objects to be converted
61
+
62
+ Returns
63
+ -------
64
+ pd.Timestamp
65
+ A pandas timestamp or array of pandas timestamps
66
+ """
67
+ squeeze_output = False
68
+ if not hasattr(dates, "__iter__") or isinstance(dates, str):
69
+ dates = np.atleast_1d(dates)
70
+ squeeze_output = True
71
+
72
+ if isinstance(next(iter(dates)), cftime.datetime):
73
+ pd_dates = xr.CFTimeIndex(dates).to_datetimeindex()
74
+ else:
75
+ pd_dates = pd.to_datetime(dates)
76
+
77
+ if squeeze_output:
78
+ return next(iter(pd_dates))
79
+ return pd_dates
80
+
81
+
82
+ def to_datetime(
83
+ dates: Union[str, datetime.datetime, np.datetime64, pd.Timestamp, cftime.datetime],
84
+ ) -> datetime.datetime:
85
+ """Converts a provided datetime-like object to python datetime objects
86
+
87
+ Parameters
88
+ ----------
89
+ dates : Union[str, datetime.datetime, np.datetime64, pd.Timestamp, cftime.datetime]
90
+ A datetime-like object or array of datetime-like objects to be converted
91
+
92
+ Returns
93
+ -------
94
+ datetime.datetime
95
+ A python datetime or array of python datetimes
96
+ """
97
+ return to_timestamp(dates).to_pydatetime()
98
+
99
+
100
+ def to_datetime64(
101
+ dates: Union[str, datetime.datetime, np.datetime64, pd.Timestamp, cftime.datetime],
102
+ ) -> np.datetime64:
103
+ """Converts a provided datetime-like object to numpy datetime64 objects
104
+
105
+ Parameters
106
+ ----------
107
+ dates : Union[str, datetime.datetime, np.datetime64, pd.Timestamp, cftime.datetime]
108
+ A datetime-like object or array of datetime-like objects to be converted
109
+
110
+ Returns
111
+ -------
112
+ np.datetime64
113
+ A numpy datetime64 or array of numpy datetime64s
114
+ """
115
+ return to_timestamp(dates).to_numpy()
116
+
117
+
118
+ def to_datestr(
119
+ dates: Union[str, datetime.datetime, np.datetime64, pd.Timestamp, cftime.datetime],
120
+ ) -> str:
121
+ """Converts a provided datetime-like object to ISO format date strings
122
+
123
+ Parameters
124
+ ----------
125
+ dates : Union[str, datetime.datetime, np.datetime64, pd.Timestamp, cftime.datetime]
126
+ A datetime-like object or array of datetime-like objects to be converted
127
+
128
+ Returns
129
+ -------
130
+ str
131
+ A string or array of strings in ISO date format
132
+ """
133
+ dates = to_datetime64(dates)
134
+ if hasattr(dates, "__iter__"):
135
+ return dates.astype(str)
136
+ return str(dates)
137
+
138
+
139
+ def match_datetime_format(
140
+ dates: Union[str, datetime.datetime, np.datetime64, pd.Timestamp, cftime.datetime],
141
+ target: Union[str, datetime.datetime, np.datetime64, pd.Timestamp, cftime.datetime],
142
+ ) -> Union[str, datetime.datetime, np.datetime64, pd.Timestamp, cftime.datetime]:
143
+ """Converts the provided datetime-like objects to the same datetime format
144
+ as the provided target
145
+
146
+ Parameters
147
+ ----------
148
+ dates : Union[str, datetime.datetime, np.datetime64, pd.Timestamp, cftime.datetime]
149
+ A datetime-like object or array of datetime-like objects to be converted
150
+ target : Union[str, datetime.datetime, np.datetime64, pd.Timestamp, cftime.datetime]
151
+ A datetime-like object or array of datetime-like objects which the dates
152
+ input will be converted to match
153
+
154
+ Returns
155
+ -------
156
+ Union[str, datetime.datetime, np.datetime64, pd.Timestamp, cftime.datetime]
157
+ The datetime-like values of the date parameter, converted to a format
158
+ which matches that of the target input
159
+
160
+ Raises
161
+ ------
162
+ ValueError
163
+ If the target parameter provided is not a datetime-time object or array
164
+ of datetime-like objects
165
+ """
166
+ if isinstance(target, str):
167
+ return to_datestr(dates)
168
+ if isinstance(target, xr.DataArray):
169
+ target = target.values
170
+ if isinstance(target, pd.Series):
171
+ target = target.to_numpy()
172
+ if hasattr(target, "__iter__"):
173
+ target = target[0]
174
+ if isinstance(target, str):
175
+ return to_datestr(dates)
176
+ if isinstance(target, cftime.datetime):
177
+ return to_cftime(dates, target.calendar)
178
+ if isinstance(target, pd.Timestamp):
179
+ return to_timestamp(dates)
180
+ if isinstance(target, np.datetime64):
181
+ return to_datetime64(dates)
182
+ if isinstance(target, datetime.datetime):
183
+ return to_datetime(dates)
184
+ raise ValueError("Target is not a valid datetime format")