ctdproc 0.1.3__tar.gz → 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. ctdproc-0.2.0/PKG-INFO +76 -0
  2. {ctdproc-0.1.3 → ctdproc-0.2.0}/README.rst +16 -4
  3. ctdproc-0.2.0/pyproject.toml +74 -0
  4. ctdproc-0.2.0/src/ctdproc/__init__.py +6 -0
  5. {ctdproc-0.1.3 → ctdproc-0.2.0/src}/ctdproc/calcs.py +79 -21
  6. {ctdproc-0.1.3 → ctdproc-0.2.0/src}/ctdproc/helpers.py +69 -36
  7. ctdproc-0.2.0/src/ctdproc/io.py +976 -0
  8. {ctdproc-0.1.3 → ctdproc-0.2.0/src}/ctdproc/proc.py +266 -249
  9. ctdproc-0.1.3/AUTHORS.rst +0 -25
  10. ctdproc-0.1.3/CONTRIBUTING.rst +0 -128
  11. ctdproc-0.1.3/HISTORY.rst +0 -18
  12. ctdproc-0.1.3/MANIFEST.in +0 -13
  13. ctdproc-0.1.3/PKG-INFO +0 -77
  14. ctdproc-0.1.3/ctdproc/__init__.py +0 -8
  15. ctdproc-0.1.3/ctdproc/io.py +0 -521
  16. ctdproc-0.1.3/ctdproc.egg-info/PKG-INFO +0 -77
  17. ctdproc-0.1.3/ctdproc.egg-info/SOURCES.txt +0 -37
  18. ctdproc-0.1.3/ctdproc.egg-info/dependency_links.txt +0 -1
  19. ctdproc-0.1.3/ctdproc.egg-info/not-zip-safe +0 -1
  20. ctdproc-0.1.3/ctdproc.egg-info/requires.txt +0 -15
  21. ctdproc-0.1.3/ctdproc.egg-info/top_level.txt +0 -1
  22. ctdproc-0.1.3/docs/Makefile +0 -20
  23. ctdproc-0.1.3/docs/_build/html/_static/file.png +0 -0
  24. ctdproc-0.1.3/docs/_build/html/_static/minus.png +0 -0
  25. ctdproc-0.1.3/docs/_build/html/_static/plus.png +0 -0
  26. ctdproc-0.1.3/docs/authors.rst +0 -1
  27. ctdproc-0.1.3/docs/conf.py +0 -167
  28. ctdproc-0.1.3/docs/contributing.rst +0 -1
  29. ctdproc-0.1.3/docs/history.rst +0 -1
  30. ctdproc-0.1.3/docs/index.rst +0 -19
  31. ctdproc-0.1.3/docs/installation.rst +0 -52
  32. ctdproc-0.1.3/docs/make.bat +0 -36
  33. ctdproc-0.1.3/docs/notes.rst +0 -29
  34. ctdproc-0.1.3/docs/readme.rst +0 -1
  35. ctdproc-0.1.3/docs/source/ctdproc.rst +0 -54
  36. ctdproc-0.1.3/docs/source/ctdproc.tests.rst +0 -38
  37. ctdproc-0.1.3/docs/source/modules.rst +0 -7
  38. ctdproc-0.1.3/docs/usage.rst +0 -57
  39. ctdproc-0.1.3/notebooks/ctdproc_example.ipynb +0 -1369
  40. ctdproc-0.1.3/setup.cfg +0 -38
  41. ctdproc-0.1.3/setup.py +0 -68
  42. {ctdproc-0.1.3 → ctdproc-0.2.0}/LICENSE +0 -0
ctdproc-0.2.0/PKG-INFO ADDED
@@ -0,0 +1,76 @@
1
+ Metadata-Version: 2.4
2
+ Name: ctdproc
3
+ Version: 0.2.0
4
+ Summary: CTD data processing
5
+ Author: Gunnar Voet
6
+ Author-email: Gunnar Voet <gvoet@ucsd.edu>
7
+ License-Expression: MIT
8
+ License-File: LICENSE
9
+ Classifier: Development Status :: 4 - Beta
10
+ Classifier: Intended Audience :: Science/Research
11
+ Classifier: Topic :: Scientific/Engineering :: Physics
12
+ Classifier: License :: OSI Approved :: MIT License
13
+ Classifier: Natural Language :: English
14
+ Classifier: Programming Language :: Python :: 3
15
+ Requires-Dist: gsw>=3.6.20
16
+ Requires-Dist: matplotlib>=3.9.4
17
+ Requires-Dist: munch>=4.0.0
18
+ Requires-Dist: netcdf4>=1.7.2
19
+ Requires-Dist: numpy>=2.0.2
20
+ Requires-Dist: pandas>=2.3.3
21
+ Requires-Dist: scipy>=1.13.1
22
+ Requires-Dist: xarray>=2024.7.0
23
+ Requires-Dist: xmltodict>=1.0.2
24
+ Requires-Python: >=3.9
25
+ Project-URL: Bug Tracker, https://github.com/gunnarvoet/ctdproc/issues
26
+ Project-URL: Changelog, https://github.com/gunnarvoet/ctdproc/blob/main/HISTORY.rst
27
+ Project-URL: Documentation, https://ctdproc.readthedocs.io
28
+ Project-URL: Homepage, https://github.com/gunnarvoet/ctdproc
29
+ Description-Content-Type: text/x-rst
30
+
31
+ =======
32
+ ctdproc
33
+ =======
34
+
35
+
36
+ .. image:: https://img.shields.io/pypi/v/ctdproc.svg
37
+ :target: https://pypi.python.org/pypi/ctdproc
38
+
39
+ .. image:: https://readthedocs.org/projects/ctdproc/badge/?version=latest
40
+ :target: https://ctdproc.readthedocs.io/en/latest/?badge=latest
41
+ :alt: Documentation Status
42
+
43
+ .. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json
44
+ :target: https://docs.astral.sh/ruff/
45
+ :alt: Ruff
46
+
47
+
48
+
49
+ CTD data processing in python.
50
+
51
+ * Free software: MIT license
52
+ * Documentation: https://ctdproc.readthedocs.io.
53
+
54
+
55
+ Features
56
+ --------
57
+
58
+ * Convert CTD data collected with Seabird 9/11 systems in hex-format to human-readable formats and physical units.
59
+
60
+ * Process CTD time series data into depth-binned profiles.
61
+
62
+
63
+ Additional Information
64
+ ----------------------
65
+ Information on the Seabird hex data format saved by SBE 11 Deck Units can be found in the `SBE manual <./misc/manual-11pV2_018.pdf>`_ on p. 65ff.
66
+
67
+
68
+ Credits
69
+ -------
70
+
71
+ This package borrows heavily from a toolbox written in MATLAB® with contributions from Jennifer MacKinnon, Shaun Johnston, Daniel Rudnick, Robert Todd and others.
72
+
73
+
74
+ Docs
75
+ ----
76
+ `uv run make docs` will generate the docs.
@@ -6,13 +6,14 @@ ctdproc
6
6
  .. image:: https://img.shields.io/pypi/v/ctdproc.svg
7
7
  :target: https://pypi.python.org/pypi/ctdproc
8
8
 
9
- .. image:: https://img.shields.io/travis/gunnarvoet/ctdproc.svg
10
- :target: https://travis-ci.com/gunnarvoet/ctdproc
11
-
12
9
  .. image:: https://readthedocs.org/projects/ctdproc/badge/?version=latest
13
10
  :target: https://ctdproc.readthedocs.io/en/latest/?badge=latest
14
11
  :alt: Documentation Status
15
12
 
13
+ .. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json
14
+ :target: https://docs.astral.sh/ruff/
15
+ :alt: Ruff
16
+
16
17
 
17
18
 
18
19
  CTD data processing in python.
@@ -28,7 +29,18 @@ Features
28
29
 
29
30
  * Process CTD time series data into depth-binned profiles.
30
31
 
32
+
33
+ Additional Information
34
+ ----------------------
35
+ Information on the Seabird hex data format saved by SBE 11 Deck Units can be found in the `SBE manual <./misc/manual-11pV2_018.pdf>`_ on p. 65ff.
36
+
37
+
31
38
  Credits
32
39
  -------
33
40
 
34
- This package borrows heavily from a toolbox written in MATLAB® with contributions from Jennifer MacKinnon, Shaun Johnston, Daniel Rudnick, Robert Todd and others.
41
+ This package borrows heavily from a toolbox written in MATLAB® with contributions from Jennifer MacKinnon, Shaun Johnston, Daniel Rudnick, Robert Todd and others.
42
+
43
+
44
+ Docs
45
+ ----
46
+ `uv run make docs` will generate the docs.
@@ -0,0 +1,74 @@
1
+ [project]
2
+ name = "ctdproc"
3
+ version = "0.2.0"
4
+ description = "CTD data processing"
5
+ readme = "README.rst"
6
+ authors = [ { name = "Gunnar Voet", email = "gvoet@ucsd.edu" }, ]
7
+ license = "MIT"
8
+ license-files = ["LICENSE", ]
9
+ requires-python = ">=3.9"
10
+ dependencies = [
11
+ "gsw>=3.6.20",
12
+ "matplotlib>=3.9.4",
13
+ "munch>=4.0.0",
14
+ "netcdf4>=1.7.2",
15
+ "numpy>=2.0.2",
16
+ "pandas>=2.3.3",
17
+ "scipy>=1.13.1",
18
+ "xarray>=2024.7.0",
19
+ "xmltodict>=1.0.2",
20
+ ]
21
+ classifiers=[
22
+ "Development Status :: 4 - Beta",
23
+ "Intended Audience :: Science/Research",
24
+ "Topic :: Scientific/Engineering :: Physics",
25
+ "License :: OSI Approved :: MIT License",
26
+ "Natural Language :: English",
27
+ "Programming Language :: Python :: 3",
28
+ ]
29
+
30
+ [project.urls]
31
+ "Homepage" = "https://github.com/gunnarvoet/ctdproc"
32
+ "Documentation" = "https://ctdproc.readthedocs.io"
33
+ "Changelog" = "https://github.com/gunnarvoet/ctdproc/blob/main/HISTORY.rst"
34
+ "Bug Tracker" = "https://github.com/gunnarvoet/ctdproc/issues"
35
+
36
+ [build-system]
37
+ requires = ["uv_build>=0.8.22,<0.10.0"]
38
+ build-backend = "uv_build"
39
+
40
+ [dependency-groups]
41
+ dev = [
42
+ "pytest>=8.4.2",
43
+ "ruff>=0.14.2",
44
+ "sphinx>=7.4.7",
45
+ "sphinx-issues>=5.0.1",
46
+ "sphinx-rtd-theme>=3.0.2",
47
+ ]
48
+
49
+ [tool.pytest.ini_options]
50
+ minversion = "6.0"
51
+ addopts = [
52
+ "--strict-markers", # Warn about undeclared markers
53
+ "--import-mode=importlib", # Recommended for modern projects
54
+ ]
55
+ testpaths = [
56
+ "tests", # Directory where your tests are located
57
+ ]
58
+ norecursedirs = [
59
+ ".git",
60
+ ".venv",
61
+ "dist",
62
+ "build",
63
+ "__pycache__"
64
+ ]
65
+ xfail_strict = true
66
+
67
+ [tool.ruff]
68
+ exclude = [".venv"]
69
+
70
+ [[tool.uv.index]]
71
+ name = "testpypi"
72
+ url = "https://test.pypi.org/simple/"
73
+ publish-url = "https://test.pypi.org/legacy/"
74
+ explicit = true
@@ -0,0 +1,6 @@
1
+ import importlib.metadata
2
+ from . import calcs, helpers, io, proc
3
+
4
+ __all__ = ["io", "proc", "calcs", "helpers"]
5
+ # version is defined in pyproject.toml
6
+ __version__ = importlib.metadata.version("ctdproc")
@@ -1,9 +1,9 @@
1
1
  #!/usr/bin/env python
2
2
  # coding: utf-8
3
- import numpy as np
4
- import xarray as xr
5
3
  import gsw
4
+ import numpy as np
6
5
  from scipy import signal
6
+
7
7
  from . import helpers
8
8
 
9
9
 
@@ -38,7 +38,7 @@ def swcalcs(data):
38
38
  """
39
39
  # Most derived variables are now calculated in proc.ctd_cleanup2()
40
40
  # Not sure why they are calculated two times in the Matlab package.
41
-
41
+
42
42
  # data = calc_sal(data)
43
43
  # data = calc_temp(data)
44
44
  # data = calc_sigma(data)
@@ -55,12 +55,44 @@ def calc_sal(data):
55
55
  SA2, SP2 = calc_allsal(data.c2, data.t2, data.p, data.lon, data.lat)
56
56
 
57
57
  # Absolute salinity
58
- data["SA1"] = (["time"], SA1, {"long_name": "absolute salinity", "units": "g/kg"})
59
- data["SA2"] = (["time"], SA2, {"long_name": "absolute salinity", "units": "g/kg"})
58
+ data["SA1"] = (
59
+ ("time",),
60
+ SA1.data,
61
+ {
62
+ "long_name": "absolute salinity",
63
+ "units": "g kg-1",
64
+ "standard_name": "sea_water_absolute_salinity",
65
+ },
66
+ )
67
+ data["SA2"] = (
68
+ ("time",),
69
+ SA2.data,
70
+ {
71
+ "long_name": "absolute salinity",
72
+ "units": "g kg-1",
73
+ "standard_name": "sea_water_absolute_salinity",
74
+ },
75
+ )
60
76
 
61
77
  # Practical salinity
62
- data["s1"] = (["time"], SP1, {"long_name": "practical salinity", "units": ""})
63
- data["s2"] = (["time"], SP2, {"long_name": "practical salinity", "units": ""})
78
+ data["s1"] = (
79
+ ("time",),
80
+ SP1.data,
81
+ {
82
+ "long_name": "practical salinity",
83
+ "units": "",
84
+ "standard_name": "sea_water_practical_salinity",
85
+ },
86
+ )
87
+ data["s2"] = (
88
+ ("time",),
89
+ SP2.data,
90
+ {
91
+ "long_name": "practical salinity",
92
+ "units": "",
93
+ "standard_name": "sea_water_practical_salinity",
94
+ },
95
+ )
64
96
 
65
97
  return data
66
98
 
@@ -69,19 +101,32 @@ def calc_temp(data):
69
101
  # Conservative temperature
70
102
  for si in ["1", "2"]:
71
103
  data["CT{:s}".format(si)] = (
72
- ["time"],
73
- gsw.CT_from_t(data["s{:s}".format(si)], data["t{:s}".format(si)], data.p),
74
- {"long_name": "conservative temperature", "units": "°C"},
104
+ ("time",),
105
+ gsw.CT_from_t(
106
+ data["s{:s}".format(si)], data["t{:s}".format(si)], data.p
107
+ ).data,
108
+ {
109
+ "long_name": "conservative temperature",
110
+ "units": "degree_C",
111
+ "standard_name": "sea_water_conservative_temperature",
112
+ },
75
113
  )
76
114
 
77
115
  # Potential temperature
78
116
  for si in ["1", "2"]:
79
117
  data["th{:s}".format(si)] = (
80
- ["time"],
118
+ ("time",),
81
119
  gsw.pt_from_t(
82
- data["SA{:s}".format(si)], data["t{:s}".format(si)], p=data.p, p_ref=0
83
- ),
84
- {"long_name": "potential temperature", "units": "°C"},
120
+ data["SA{:s}".format(si)],
121
+ data["t{:s}".format(si)],
122
+ p=data.p,
123
+ p_ref=0,
124
+ ).data,
125
+ {
126
+ "long_name": "potential temperature",
127
+ "units": "degree_C",
128
+ "standard_name": "sea_water_potential_temperature",
129
+ },
85
130
  )
86
131
 
87
132
  return data
@@ -91,9 +136,17 @@ def calc_sigma(data):
91
136
  # Potential density anomaly
92
137
  for si in ["1", "2"]:
93
138
  data["sg{:s}".format(si)] = (
94
- ["time"],
95
- gsw.sigma0(data["SA{:s}".format(si)], data["CT{:s}".format(si)],),
96
- {"long_name": "potential density anomaly", "units": "kg/m$^3$"},
139
+ ("time",),
140
+ gsw.sigma0(
141
+ data["SA{:s}".format(si)],
142
+ data["CT{:s}".format(si)],
143
+ ).data,
144
+ {
145
+ "long_name": "potential density anomaly",
146
+ "units": "kg m-3",
147
+ "standard_name": "sea_water_sigma_theta",
148
+ "reference_pressure": "0 dbar",
149
+ },
97
150
  )
98
151
  return data
99
152
 
@@ -101,9 +154,14 @@ def calc_sigma(data):
101
154
  def calc_depth(data):
102
155
  # Depth
103
156
  data.coords["depth"] = (
104
- ["time"],
105
- -1 * gsw.z_from_p(data.p, data.lat),
106
- {"long_name": "depth", "units": "m"},
157
+ ("time",),
158
+ -1 * gsw.z_from_p(data.p, data.lat).data,
159
+ {
160
+ "long_name": "depth",
161
+ "units": "m",
162
+ "standard_name": "depth",
163
+ "positive": "down",
164
+ },
107
165
  )
108
166
  return data
109
167
 
@@ -148,7 +206,7 @@ def wsink(p, Ts, Fs):
148
206
  Computes the sinking (or rising) velocity from the pressure signal p
149
207
  by first differencing. The pressure signal is smoothed with a low-pass
150
208
  filter for differentiation. If the input signal is shorter than the
151
- smoothing time scale, w is taken as the slope of the linear regression of p.
209
+ smoothing time scale, w is taken as the slope of the linear regression of p.
152
210
 
153
211
  Adapted from wsink.m - Fabian Wolk, Rockland Oceanographic Services Inc.
154
212
 
@@ -1,8 +1,10 @@
1
- import numpy as np
2
1
  import datetime
3
- from scipy.interpolate import interp1d
4
2
  import warnings
5
3
 
4
+ import numpy as np
5
+ import pandas as pd
6
+ from scipy.interpolate import interp1d
7
+
6
8
 
7
9
  def unique_arrays(*arrays):
8
10
  """
@@ -10,21 +12,19 @@ def unique_arrays(*arrays):
10
12
 
11
13
  Parameters
12
14
  ----------
13
- arrays : np.array or list
14
- Numpy arrays either in a list or separated by commas.
15
+ arrays : A tuple of np.array.ndarray
15
16
 
16
17
  Returns
17
18
  -------
18
19
  unique : np.array
19
- Numpy array with unique elements from the input arrays.
20
+ numpy array with unique elements from the input arrays
20
21
  """
21
- h = np.hstack(np.squeeze(arrays))
22
- return np.unique(h)
22
+ return np.unique(np.hstack(arrays))
23
23
 
24
24
 
25
25
  def findsegments(ibad):
26
26
  """
27
- Find contiguous segments in an array of indices.
27
+ Find contiguous segments in an array of indices.
28
28
 
29
29
  Parameters
30
30
  ----------
@@ -42,7 +42,6 @@ def findsegments(ibad):
42
42
  """
43
43
  dibad = np.diff(ibad)
44
44
  jj = np.argwhere(dibad > 1)
45
- nseg = jj.size + 1
46
45
 
47
46
  istart = jj + 1
48
47
  istart = np.insert(istart, 0, 0)
@@ -86,10 +85,10 @@ def inearby(ibad, inearm, inearp, n):
86
85
  k = np.array([]).astype("int64")
87
86
  else:
88
87
  istart, istop, seglength = findsegments(ibad)
89
- new_ind = []
88
+ new_ind = np.array([]).astype("int64")
90
89
  for ia, ib in zip(istart, istop):
91
- new_ind.append(list(range(ia - inearm, ib + inearp + 1)))
92
- k = unique_arrays(new_ind)
90
+ new_ind = np.append(new_ind, np.arange(ia - inearm, ib + inearp + 1))
91
+ k = np.unique(new_ind)
93
92
  k = k[((k >= 0) & (k < n))]
94
93
  return k
95
94
 
@@ -110,17 +109,25 @@ def interpbadsegments(x, ibad):
110
109
  y : np.array
111
110
  Interpolated array
112
111
  """
112
+
113
+ def start_end_warning(loc):
114
+ warnings.warn(
115
+ message=f"no interpolation at {loc}",
116
+ category=RuntimeWarning,
117
+ )
118
+
113
119
  istart, istop, seglen = findsegments(ibad)
114
- nsegs = istart.size
115
120
  y = x.copy()
116
121
  for iia, iis, iilen in zip(istart, istop, seglen):
117
122
  i1 = iia - 1
118
123
  i2 = range(iia, iis + 1)
119
124
  i3 = iis + 1
120
125
  if i1 < 0:
121
- print("interpbadsegments: bad at istart - no interpolation at start")
126
+ # start_end_warning("start")
127
+ pass
122
128
  elif i3 > x.size:
123
- print("interpbadsegments: bad at istop - no interpolation at stop")
129
+ # start_end_warning("end")
130
+ pass
124
131
  else:
125
132
  y[i2] = interp1d(np.array([i1, i3]), x[[i1, i3]])(i2)
126
133
  return y
@@ -129,7 +136,7 @@ def interpbadsegments(x, ibad):
129
136
  def glitchcorrect(x, diffx, prodx, ibefore=0, iafter=0):
130
137
  """
131
138
  Remove glitches/spikes in array.
132
-
139
+
133
140
  Adapted from tms_tc_glitchcorrect.m
134
141
 
135
142
  Parameters
@@ -157,27 +164,35 @@ def glitchcorrect(x, diffx, prodx, ibefore=0, iafter=0):
157
164
  with warnings.catch_warnings():
158
165
  # Prevent warning due to nans present in nanmin being printed
159
166
  warnings.simplefilter("ignore")
160
- dmin2 = np.nanmin(np.vstack([np.absolute(dx[0:-1]), np.absolute(dx[1:])]), axis=0)
161
- dmin3 = np.nanmin(np.vstack([np.absolute(dx[0:-2]), np.absolute(dx[2:])]), axis=0)
167
+ dmin2 = np.nanmin(
168
+ np.vstack([np.absolute(dx[0:-1]), np.absolute(dx[1:])]), axis=0
169
+ )
170
+ dmin3 = np.nanmin(
171
+ np.vstack([np.absolute(dx[0:-2]), np.absolute(dx[2:])]), axis=0
172
+ )
162
173
 
163
174
  dmul2 = -dx[0:-1] * dx[1:]
164
175
  dmul3 = -dx[0:-2] * dx[2:]
165
176
 
166
- ii2 = np.argwhere(
167
- np.greater(dmul2, prodx, where=np.isfinite(dmul2)) &
168
- np.greater(dmin2, diffx, where=np.isfinite(dmin2))
177
+ ii2 = np.squeeze(
178
+ np.argwhere(
179
+ np.greater(dmul2, prodx, where=np.isfinite(dmul2))
180
+ & np.greater(dmin2, diffx, where=np.isfinite(dmin2))
169
181
  )
170
- ii3 = np.argwhere(
171
- np.greater(dmul3, prodx, where=np.isfinite(dmul3)) &
172
- np.greater(dmin3, diffx, where=np.isfinite(dmin3))
182
+ )
183
+ ii3 = np.squeeze(
184
+ np.argwhere(
185
+ np.greater(dmul3, prodx, where=np.isfinite(dmul3))
186
+ & np.greater(dmin3, diffx, where=np.isfinite(dmin3))
173
187
  )
188
+ )
174
189
 
175
190
  ii2 = unique_arrays(ii2, ii2 + 1)
176
191
  ii3 = unique_arrays(ii3, ii3 + 1, ii3 + 2)
177
- ii = unique_arrays(ii2, ii3)
178
192
 
179
193
  jj2 = inearby(ii2, ibefore, iafter, nx)
180
194
  jj3 = inearby(ii3, ibefore, iafter, nx)
195
+
181
196
  jj = unique_arrays(jj2, jj3)
182
197
 
183
198
  if jj.size > 0:
@@ -205,29 +220,30 @@ def preen(x, xmin, xmax):
205
220
  Cleaned array
206
221
  """
207
222
  indexall = np.array(range(0, x.size))
208
- ii = np.squeeze(np.where(((x<xmin) | (x>xmax) | (np.imag(x)!=0))))
223
+ ii = np.squeeze(np.where(((x < xmin) | (x > xmax) | (np.imag(x) != 0))))
209
224
  indexclean = np.delete(indexall, ii)
210
225
  x = np.delete(x, ii)
211
- xp = interp1d(indexclean, x)(indexall)
226
+ fint = interp1d(indexclean, x, bounds_error=False, fill_value=np.nan)
227
+ xp = fint(indexall)
212
228
  return xp
213
229
 
214
230
 
215
231
  def atanfit(x, f, Phi, W):
216
- f = np.arctan(2*np.pi*f*x[0]) + 2*np.pi*f*x[1] + Phi
217
- f = np.matmul(np.matmul(f.transpose(), W**4), f)
232
+ f = np.arctan(2 * np.pi * f * x[0]) + 2 * np.pi * f * x[1] + Phi
233
+ f = np.matmul(np.matmul(f.transpose(), W ** 4), f)
218
234
  return f
219
235
 
220
236
 
221
237
  def pad_lr(p, nPad):
222
238
  """Pad array left and right"""
223
239
  p0 = p[0]
224
- p=p-p0
225
- p=p0+np.insert(p, 0, -p[nPad-1::-1])
226
-
240
+ p = p - p0
241
+ p = p0 + np.insert(p, 0, -p[nPad - 1 :: -1])
242
+
227
243
  p0 = p[-1]
228
- p = p-p0
229
- p = p0 + np.insert(p, -1, -p[:-nPad-1:-1])
230
-
244
+ p = p - p0
245
+ p = p0 + np.insert(p, -1, -p[: -nPad - 1 : -1])
246
+
231
247
  return p
232
248
 
233
249
 
@@ -280,7 +296,7 @@ def mtlb2datetime(matlab_datenum, strip_microseconds=False, strip_seconds=False)
280
296
  ]
281
297
  elif strip_microseconds:
282
298
  tt = [datetime.datetime.replace(tval, microsecond=0) for tval in tt]
283
- tt = [np.datetime64(ti) for ti in tt]
299
+ tt = [np.datetime64(ti, "ns") for ti in tt]
284
300
  xi = np.where(nonan)[0]
285
301
  for i, ii in enumerate(xi):
286
302
  t1[ii] = tt[i]
@@ -290,3 +306,20 @@ def mtlb2datetime(matlab_datenum, strip_microseconds=False, strip_seconds=False)
290
306
  t1 = np.array(t1)
291
307
 
292
308
  return t1
309
+
310
+
311
+ def datetime2mtlb(dt):
312
+ pt = pd.to_datetime(dt)
313
+ dt = pt.to_pydatetime()
314
+ mdn = dt + datetime.timedelta(days=366)
315
+ frac_seconds = [
316
+ (dti - datetime.datetime(dti.year, dti.month, dti.day, 0, 0, 0)).seconds
317
+ / (24.0 * 60.0 * 60.0)
318
+ for dti in dt
319
+ ]
320
+ frac_microseconds = [
321
+ dti.microsecond / (24.0 * 60.0 * 60.0 * 1000000.0) for dti in dt
322
+ ]
323
+ out = np.array([mdni.toordinal() for mdni in mdn])
324
+ out = out.astype(float) + frac_seconds + frac_microseconds
325
+ return out