tonik 0.0.2__tar.gz → 0.0.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tonik-0.0.3/HOW_TO_RELEASE.md +51 -0
- {tonik-0.0.2 → tonik-0.0.3}/PKG-INFO +13 -9
- tonik-0.0.3/docs/img/storagegroup_output.png +0 -0
- {tonik-0.0.2 → tonik-0.0.3}/docs/user_guide.md +27 -15
- tonik-0.0.3/examples/test_experiment/MDR1/00/HHZ/spectrogram.nc +0 -0
- tonik-0.0.3/examples/test_experiment/MDR2/00/HHZ/spectrogram.nc +0 -0
- {tonik-0.0.2 → tonik-0.0.3}/examples/tonik_example.ipynb +28 -14
- {tonik-0.0.2 → tonik-0.0.3}/pyproject.toml +16 -8
- tonik-0.0.3/src/tonik/__init__.py +23 -0
- tonik-0.0.3/src/tonik/api.py +276 -0
- tonik-0.0.3/src/tonik/package_data/index.html +99 -0
- tonik-0.0.2/src/tonik/lockerroom.py → tonik-0.0.3/src/tonik/storage.py +145 -136
- tonik-0.0.3/src/tonik/utils.py +43 -0
- tonik-0.0.3/tests/conftest.py +90 -0
- tonik-0.0.3/tests/test_api.py +221 -0
- tonik-0.0.3/tests/test_group.py +137 -0
- tonik-0.0.3/tests/test_xarray2hdf5.py +69 -0
- tonik-0.0.2/.pyproject.toml.un~ +0 -0
- tonik-0.0.2/.pytest_cache/.gitignore +0 -2
- tonik-0.0.2/.pytest_cache/CACHEDIR.TAG +0 -4
- tonik-0.0.2/.pytest_cache/README.md +0 -8
- tonik-0.0.2/.pytest_cache/v/cache/lastfailed +0 -12
- tonik-0.0.2/.pytest_cache/v/cache/nodeids +0 -17678
- tonik-0.0.2/.pytest_cache/v/cache/stepwise +0 -1
- tonik-0.0.2/docs/img/lockerroom_output.png +0 -0
- tonik-0.0.2/pyproject.toml~ +0 -32
- tonik-0.0.2/src/tonik/__init__.py +0 -1
- tonik-0.0.2/tests/test_xarray2hdf5.py +0 -100
- {tonik-0.0.2 → tonik-0.0.3}/.devcontainer/devcontainer.json +0 -0
- {tonik-0.0.2 → tonik-0.0.3}/LICENSE +0 -0
- {tonik-0.0.2 → tonik-0.0.3}/README.md +0 -0
- {tonik-0.0.2 → tonik-0.0.3}/docs/img/Thumbs.db +0 -0
- {tonik-0.0.2 → tonik-0.0.3}/docs/img/directory_listing.png +0 -0
- {tonik-0.0.2 → tonik-0.0.3}/docs/img/spectrogram1.png +0 -0
- {tonik-0.0.2 → tonik-0.0.3}/docs/img/spectrogram2.png +0 -0
- {tonik-0.0.2 → tonik-0.0.3}/docs/index.md +0 -0
- {tonik-0.0.2 → tonik-0.0.3}/examples/.nfs00000001a69e0bbe002cc265 +0 -0
- {tonik-0.0.2 → tonik-0.0.3/examples}/tonik.log +0 -0
- {tonik-0.0.2 → tonik-0.0.3}/mkdocs.yml +0 -0
- {tonik-0.0.2 → tonik-0.0.3}/src/tonik/xarray2hdf5.py +0 -0
- /tonik-0.0.2/tests/test_lockerroom.py → /tonik-0.0.3/tonik.log +0 -0
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
# How to issue a new zizou release
|
|
2
|
+
|
|
3
|
+
## Pypi
|
|
4
|
+
Install the build system
|
|
5
|
+
```
|
|
6
|
+
python3 -m pip install --upgrade build
|
|
7
|
+
```
|
|
8
|
+
Install `twine`:
|
|
9
|
+
```
|
|
10
|
+
python3 -m pip install --upgrade twine
|
|
11
|
+
```
|
|
12
|
+
Make sure the version number is incremented in the `pyproject.toml` file.
|
|
13
|
+
|
|
14
|
+
Run the build to create two new files under `dist`. One is the wheel and the other the packaged source code:
|
|
15
|
+
```
|
|
16
|
+
python3 -m build
|
|
17
|
+
```
|
|
18
|
+
Upload to pypi:
|
|
19
|
+
```
|
|
20
|
+
python3 -m twine upload dist/*
|
|
21
|
+
```
|
|
22
|
+
When prompted for the username enter `__token__` and paste in your pypi token as the password.
|
|
23
|
+
|
|
24
|
+
## Documentation
|
|
25
|
+
Install the [mkdocs](https://www.mkdocs.org/) package and the [mkdocstrings](https://mkdocstrings.github.io/) plugin:
|
|
26
|
+
```
|
|
27
|
+
python3 -m pip install --upgrade mkdocs "mkdocstrings[python]"
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
To view the documentation locally run:
|
|
31
|
+
```
|
|
32
|
+
mkdocs serve
|
|
33
|
+
```
|
|
34
|
+
To generate documentation run:
|
|
35
|
+
```
|
|
36
|
+
mkdocs build
|
|
37
|
+
```
|
|
38
|
+
from the project root directory. This creates a directory called `site` containing all the necessary files to host the documentation.
|
|
39
|
+
Please don't add `site` to version control. If it is the first time you built the documentation, run the following:
|
|
40
|
+
|
|
41
|
+
```
|
|
42
|
+
mv site ../tonik.github.io
|
|
43
|
+
cd ../tonik.github.io
|
|
44
|
+
git init
|
|
45
|
+
git add .
|
|
46
|
+
git commit -m "update documentation"
|
|
47
|
+
git branch -m main
|
|
48
|
+
git add origin git@github.com:tsc-tools/tonik.github.io.git
|
|
49
|
+
git push -u --force origin main
|
|
50
|
+
```
|
|
51
|
+
[Github pages](https://pages.github.com/) will then publish the documentation under https://tsc-tools.github.io/tonik.github.io
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
2
|
Name: tonik
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.3
|
|
4
4
|
Summary: A collection of tools to integrate with GNS Science's time series classification platform.
|
|
5
5
|
Project-URL: Homepage, https://tsc-tools.github.io/tonik.github.io
|
|
6
6
|
Project-URL: Issues, https://github.com/tsc-tools/tonik/issues
|
|
@@ -10,13 +10,17 @@ Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
|
|
|
10
10
|
Classifier: Operating System :: OS Independent
|
|
11
11
|
Classifier: Programming Language :: Python :: 3
|
|
12
12
|
Requires-Python: >=3.7
|
|
13
|
-
Requires-Dist:
|
|
14
|
-
Requires-Dist:
|
|
15
|
-
Requires-Dist:
|
|
16
|
-
Requires-Dist:
|
|
17
|
-
Requires-Dist:
|
|
18
|
-
Requires-Dist:
|
|
19
|
-
Requires-Dist:
|
|
13
|
+
Requires-Dist: datashader>=0.14
|
|
14
|
+
Requires-Dist: fastapi>=0.95
|
|
15
|
+
Requires-Dist: h5netcdf>=1.1
|
|
16
|
+
Requires-Dist: h5py>=3.8
|
|
17
|
+
Requires-Dist: netcdf4>=1.6
|
|
18
|
+
Requires-Dist: pandas>=2.0
|
|
19
|
+
Requires-Dist: python-json-logger>=2.0
|
|
20
|
+
Requires-Dist: uvicorn[standard]>=0.22
|
|
21
|
+
Requires-Dist: xarray>=2023.4
|
|
22
|
+
Provides-Extra: dev
|
|
23
|
+
Requires-Dist: pytest; extra == 'dev'
|
|
20
24
|
Description-Content-Type: text/markdown
|
|
21
25
|
|
|
22
26
|
# Time series classification tools
|
|
Binary file
|
|
@@ -6,7 +6,7 @@ import os
|
|
|
6
6
|
import numpy as np
|
|
7
7
|
import pandas as pd
|
|
8
8
|
import xarray as xr
|
|
9
|
-
from tonik import
|
|
9
|
+
from tonik import StorageGroup
|
|
10
10
|
```
|
|
11
11
|
|
|
12
12
|
A locker room consists of many lockers. Each locker stores data for a single sensor and the locker room groups sensors by, for example, experiment or geographic location. We will start by generating fake spectrogram data.
|
|
@@ -26,28 +26,40 @@ xrd['spectrogram'].plot()
|
|
|
26
26
|
Now we will store the data under two different sites of the same experiment.
|
|
27
27
|
|
|
28
28
|
```python
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
29
|
+
g = StorageGroup('test_experiment', rootdir='/tmp')
|
|
30
|
+
st1 = g.get_store(site='MDR1', sensor='00', channel='HHZ')
|
|
31
|
+
st2 = g.get_store(site='MDR2', sensor='00', channel='HHZ')
|
|
32
|
+
st1.save(xrd)
|
|
33
|
+
st2.save(xrd)
|
|
34
|
+
g
|
|
35
35
|
```
|
|
36
36
|
|
|
37
|
-

|
|
38
38
|
|
|
39
|
+
Next we want to retrieve the data we just saved. Before retrieving data we have to set the timespan over which we want to retrieve data.
|
|
40
|
+
|
|
41
|
+
```python
|
|
42
|
+
g.starttime = datetime(2024, 1, 2, 18, 0, 0)
|
|
43
|
+
g.endtime = datetime(2024, 1, 3, 6, 0, 0)
|
|
44
|
+
st = g.get_store('MDR2', '00', 'HHZ')
|
|
45
|
+
st('spectrogram').plot()
|
|
39
46
|
```
|
|
40
|
-
|
|
47
|
+
|
|
48
|
+

|
|
49
|
+
|
|
50
|
+
Tonik comes with an API to access the stored data from other applications. To test the API, first start it in a terminal:
|
|
51
|
+
```
|
|
52
|
+
tonik_api --rootdir /tmp
|
|
41
53
|
```
|
|
42
|
-

|
|
43
54
|
|
|
44
|
-
|
|
55
|
+
Now you can access the same feature through the API:
|
|
45
56
|
|
|
46
57
|
```python
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
58
|
+
url = "http://localhost:8003/feature?"
|
|
59
|
+
url += f"group={g.name}&site=MDR2&sensor=00&channel=HHZ&name=spectrogram"
|
|
60
|
+
url += f"&starttime={g.starttime.isoformat()}&endtime={g.endtime.isoformat()}"
|
|
61
|
+
spec = pd.read_csv(url, parse_dates=True, index_col=[0, 1], date_format='ISO8601').to_xarray()
|
|
62
|
+
spec.transpose('freqs', 'dates')['feature'].plot()
|
|
51
63
|
```
|
|
52
64
|
|
|
53
65
|

|
|
Binary file
|
|
Binary file
|
|
@@ -18,7 +18,7 @@
|
|
|
18
18
|
"import numpy as np\n",
|
|
19
19
|
"import pandas as pd\n",
|
|
20
20
|
"import xarray as xr\n",
|
|
21
|
-
"from tonik import
|
|
21
|
+
"from tonik import StorageGroup "
|
|
22
22
|
]
|
|
23
23
|
},
|
|
24
24
|
{
|
|
@@ -56,12 +56,19 @@
|
|
|
56
56
|
"metadata": {},
|
|
57
57
|
"outputs": [],
|
|
58
58
|
"source": [
|
|
59
|
-
"
|
|
60
|
-
"
|
|
61
|
-
"
|
|
62
|
-
"
|
|
63
|
-
"
|
|
64
|
-
"
|
|
59
|
+
"g = StorageGroup('test_experiment', rootdir='/tmp')\n",
|
|
60
|
+
"st1 = g.get_store(site='MDR1', sensor='00', channel='HHZ')\n",
|
|
61
|
+
"st2 = g.get_store(site='MDR2', sensor='00', channel='HHZ')\n",
|
|
62
|
+
"st1.save(xrd)\n",
|
|
63
|
+
"st2.save(xrd)\n",
|
|
64
|
+
"g"
|
|
65
|
+
]
|
|
66
|
+
},
|
|
67
|
+
{
|
|
68
|
+
"cell_type": "markdown",
|
|
69
|
+
"metadata": {},
|
|
70
|
+
"source": [
|
|
71
|
+
"Next we want to retrieve the data we just saved. Before retrieving data we have to set the timespan over which we want to retrieve data."
|
|
65
72
|
]
|
|
66
73
|
},
|
|
67
74
|
{
|
|
@@ -70,14 +77,20 @@
|
|
|
70
77
|
"metadata": {},
|
|
71
78
|
"outputs": [],
|
|
72
79
|
"source": [
|
|
73
|
-
"
|
|
80
|
+
"g.starttime = datetime(2024, 1, 2, 18, 0, 0)\n",
|
|
81
|
+
"g.endtime = datetime(2024, 1, 3, 6, 0, 0)\n",
|
|
82
|
+
"st = g.get_store('MDR2', '00', 'HHZ')\n",
|
|
83
|
+
"st('spectrogram').plot()"
|
|
74
84
|
]
|
|
75
85
|
},
|
|
76
86
|
{
|
|
77
|
-
"cell_type": "
|
|
87
|
+
"cell_type": "code",
|
|
88
|
+
"execution_count": null,
|
|
78
89
|
"metadata": {},
|
|
90
|
+
"outputs": [],
|
|
79
91
|
"source": [
|
|
80
|
-
"
|
|
92
|
+
"# Start the API server by running the following command in the terminal\n",
|
|
93
|
+
"# tonik_api --rootdir examples"
|
|
81
94
|
]
|
|
82
95
|
},
|
|
83
96
|
{
|
|
@@ -86,10 +99,11 @@
|
|
|
86
99
|
"metadata": {},
|
|
87
100
|
"outputs": [],
|
|
88
101
|
"source": [
|
|
89
|
-
"
|
|
90
|
-
"
|
|
91
|
-
"
|
|
92
|
-
"
|
|
102
|
+
"url = \"http://localhost:8003/feature?\"\n",
|
|
103
|
+
"url += f\"group={g.name}&site=MDR2&sensor=00&channel=HHZ&name=spectrogram\"\n",
|
|
104
|
+
"url += f\"&starttime={g.starttime.isoformat()}&endtime={g.endtime.isoformat()}\" \n",
|
|
105
|
+
"spec = pd.read_csv(url, parse_dates=True, index_col=[0, 1], date_format='ISO8601').to_xarray()\n",
|
|
106
|
+
"spec.transpose('freqs', 'dates')['feature'].plot()"
|
|
93
107
|
]
|
|
94
108
|
}
|
|
95
109
|
],
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "tonik"
|
|
7
|
-
version = "0.0.
|
|
7
|
+
version = "0.0.3"
|
|
8
8
|
authors = [
|
|
9
9
|
{ name="Yannik Behr", email="y.behr@gns.cri.nz" },
|
|
10
10
|
{ name="Christof Mueller", email="c.mueller@gns.cri.nz" }
|
|
@@ -19,15 +19,23 @@ classifiers = [
|
|
|
19
19
|
"Operating System :: OS Independent",
|
|
20
20
|
]
|
|
21
21
|
dependencies = [
|
|
22
|
-
"h5py",
|
|
23
|
-
"
|
|
24
|
-
"
|
|
25
|
-
"
|
|
26
|
-
"
|
|
27
|
-
"
|
|
28
|
-
"python-json-logger"
|
|
22
|
+
"h5py>=3.8",
|
|
23
|
+
"datashader>=0.14",
|
|
24
|
+
"xarray>=2023.4",
|
|
25
|
+
"pandas>=2.0",
|
|
26
|
+
"netcdf4>=1.6",
|
|
27
|
+
"h5netcdf>=1.1",
|
|
28
|
+
"python-json-logger>=2.0",
|
|
29
|
+
"uvicorn[standard]>=0.22",
|
|
30
|
+
"fastapi>=0.95"
|
|
29
31
|
]
|
|
30
32
|
|
|
33
|
+
[project.optional-dependencies]
|
|
34
|
+
dev = ["pytest"]
|
|
35
|
+
|
|
31
36
|
[project.urls]
|
|
32
37
|
Homepage = "https://tsc-tools.github.io/tonik.github.io"
|
|
33
38
|
Issues = "https://github.com/tsc-tools/tonik/issues"
|
|
39
|
+
|
|
40
|
+
[project.scripts]
|
|
41
|
+
tonik_api = "tonik.api:main"
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import importlib
|
|
2
|
+
from os import PathLike
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
from .storage import StorageGroup, Path
|
|
6
|
+
from .utils import generate_test_data
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def get_data(filename: Optional[PathLike] = None) -> str:
|
|
10
|
+
"""Return path to tonik package.
|
|
11
|
+
|
|
12
|
+
Parameters
|
|
13
|
+
----------
|
|
14
|
+
filename : Pathlike, default None
|
|
15
|
+
Append `filename` to returned path.
|
|
16
|
+
|
|
17
|
+
Returns
|
|
18
|
+
-------
|
|
19
|
+
pkgdir_path
|
|
20
|
+
|
|
21
|
+
"""
|
|
22
|
+
f = importlib.resources.files(__package__)
|
|
23
|
+
return str(f) if filename is None else str(f / filename)
|
|
@@ -0,0 +1,276 @@
|
|
|
1
|
+
from argparse import ArgumentParser
|
|
2
|
+
from datetime import timedelta, datetime
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
from cftime import num2date, date2num
|
|
7
|
+
import datashader as dsh
|
|
8
|
+
import numpy as np
|
|
9
|
+
import pandas as pd
|
|
10
|
+
import uvicorn
|
|
11
|
+
from fastapi import FastAPI, HTTPException
|
|
12
|
+
from fastapi.middleware.cors import CORSMiddleware
|
|
13
|
+
from fastapi.responses import HTMLResponse, StreamingResponse
|
|
14
|
+
from pydantic import BaseModel
|
|
15
|
+
from typing import List
|
|
16
|
+
|
|
17
|
+
from .storage import StorageGroup
|
|
18
|
+
from . import get_data
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class TonikAPI:
|
|
24
|
+
|
|
25
|
+
def __init__(self, rootdir) -> None:
|
|
26
|
+
self.rootdir = rootdir
|
|
27
|
+
self.app = FastAPI()
|
|
28
|
+
|
|
29
|
+
# -- allow any origin to query API
|
|
30
|
+
self.app.add_middleware(CORSMiddleware,
|
|
31
|
+
allow_origins=["*"])
|
|
32
|
+
|
|
33
|
+
self.app.get("/", response_class=HTMLResponse)(self.root)
|
|
34
|
+
self.app.get("/feature")(self.feature)
|
|
35
|
+
|
|
36
|
+
async def root(self):
|
|
37
|
+
with open(get_data("package_data/index.html"), "r", encoding="utf-8") as file:
|
|
38
|
+
html_content = file.read()
|
|
39
|
+
return HTMLResponse(content=html_content, status_code=200)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def feature(self,
|
|
43
|
+
name: str='rsam',
|
|
44
|
+
group: str='Ruapehu',
|
|
45
|
+
site: str='MAVZ',
|
|
46
|
+
sensor: str='10',
|
|
47
|
+
channel: str='HHZ',
|
|
48
|
+
starttime: datetime=datetime.utcnow()-timedelta(days=30),
|
|
49
|
+
endtime: datetime=datetime.utcnow(),
|
|
50
|
+
resolution: str='full',
|
|
51
|
+
verticalres: int=10,
|
|
52
|
+
log: bool=True,
|
|
53
|
+
normalise: bool=False):
|
|
54
|
+
|
|
55
|
+
_st = datetime.fromisoformat(str(starttime))
|
|
56
|
+
_st = _st.replace(tzinfo=None)
|
|
57
|
+
_et = datetime.fromisoformat(str(endtime))
|
|
58
|
+
_et = _et.replace(tzinfo=None)
|
|
59
|
+
g = StorageGroup(group, rootdir=self.rootdir,
|
|
60
|
+
starttime=_st, endtime=_et)
|
|
61
|
+
c = g.get_store(site=site, sensor=sensor, channel=channel)
|
|
62
|
+
try:
|
|
63
|
+
feat = c(name)
|
|
64
|
+
except ValueError as e:
|
|
65
|
+
msg = f"Feature {name} not found in directory {l.sitedir}:"
|
|
66
|
+
msg += f"{e}"
|
|
67
|
+
raise HTTPException(status_code=404, detail=msg)
|
|
68
|
+
if len(feat.shape) > 1:
|
|
69
|
+
# assume first dimension is frequency
|
|
70
|
+
nfreqs = feat.shape[0]
|
|
71
|
+
dates = feat.coords[feat.dims[1]].values
|
|
72
|
+
if resolution != 'full':
|
|
73
|
+
freq, dates, spec = self.aggregate_feature(resolution, verticalres, feat, nfreqs, dates)
|
|
74
|
+
else:
|
|
75
|
+
spec = feat.values
|
|
76
|
+
freq = feat.coords[feat.dims[0]].values
|
|
77
|
+
vals = spec.ravel(order='C')
|
|
78
|
+
if log and feat.name != 'sonogram':
|
|
79
|
+
vals = 10*np.log10(vals)
|
|
80
|
+
if normalise:
|
|
81
|
+
vals = (vals - np.nanmin(vals))/(np.nanmax(vals) - np.nanmin(vals))
|
|
82
|
+
freqs = freq.repeat(dates.size)
|
|
83
|
+
dates = np.tile(dates, freq.size)
|
|
84
|
+
df = pd.DataFrame({'dates': dates, 'freqs': freqs, 'feature': vals})
|
|
85
|
+
output = df.to_csv(index=False,
|
|
86
|
+
columns=['dates', 'freqs', 'feature'])
|
|
87
|
+
else:
|
|
88
|
+
df = pd.DataFrame(data=feat.to_pandas(), columns=[feat.name])
|
|
89
|
+
df['dates'] = df.index
|
|
90
|
+
try:
|
|
91
|
+
df = df.resample(str(float(resolution)/60000.0)+'T').mean()
|
|
92
|
+
except ValueError as e:
|
|
93
|
+
logger.warning(f"Cannot resample {feat.name} to {resolution}: e")
|
|
94
|
+
df.rename(columns={feat.name: 'feature'}, inplace=True)
|
|
95
|
+
output = df.to_csv(index=False, columns=['dates', 'feature'])
|
|
96
|
+
return StreamingResponse(iter([output]),
|
|
97
|
+
media_type='text/csv',
|
|
98
|
+
headers={"Content-Disposition":
|
|
99
|
+
"attachment;filename=<VUMT_feature>.csv",
|
|
100
|
+
'Content-Length': str(len(output))})
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def aggregate_feature(self, resolution, verticalres, feat, nfreqs, dates):
|
|
104
|
+
resolution = np.timedelta64(pd.Timedelta(resolution), 'ms').astype(float)
|
|
105
|
+
ndays = np.timedelta64(dates[-1] - dates[0], 'ms').astype(float)
|
|
106
|
+
canvas_x = int(ndays/resolution)
|
|
107
|
+
canvas_y = min(nfreqs, verticalres)
|
|
108
|
+
dates = date2num(dates.astype('datetime64[us]').astype(datetime),
|
|
109
|
+
units='hours since 1970-01-01 00:00:00.0',
|
|
110
|
+
calendar='gregorian')
|
|
111
|
+
feat = feat.assign_coords({'datetime': dates})
|
|
112
|
+
cvs = dsh.Canvas(plot_width=canvas_x,
|
|
113
|
+
plot_height=canvas_y)
|
|
114
|
+
agg = cvs.raster(source=feat)
|
|
115
|
+
freq_dim = feat.dims[0]
|
|
116
|
+
freq, d, spec = agg.coords[freq_dim].values, agg.coords['datetime'].values, agg.data
|
|
117
|
+
dates = num2date(d, units='hours since 1970-01-01 00:00:00.0', calendar='gregorian')
|
|
118
|
+
return freq,dates,spec
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
# #pydanticmodel output: Json file
|
|
122
|
+
# class Feature(BaseModel):
|
|
123
|
+
# name: list
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
# class Channel(BaseModel):
|
|
127
|
+
# name: str
|
|
128
|
+
# features: List[Feature] = []
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
# class Location(BaseModel):
|
|
132
|
+
# name: str
|
|
133
|
+
# channels: List[Channel] = []
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
# class Station(BaseModel):
|
|
137
|
+
# name: str
|
|
138
|
+
# lat: float
|
|
139
|
+
# lon: float
|
|
140
|
+
# locations: List[Location] = []
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
# class Group(BaseModel):
|
|
144
|
+
# volcano: str
|
|
145
|
+
# stations: List[Station] = []
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
# def get_pydanticModel(group, station, location, channel, feature_list):
|
|
149
|
+
|
|
150
|
+
# channels_data = {"name": channel, "features": feature_list}
|
|
151
|
+
# channel_models = []
|
|
152
|
+
# channel_model = Channel(**channels_data)
|
|
153
|
+
# channel_models.append(channel_model)
|
|
154
|
+
|
|
155
|
+
# location_data = {"name": location, "channels": channel_models}
|
|
156
|
+
# location_models = []
|
|
157
|
+
# location_model = Location(**location_data)
|
|
158
|
+
# location_models.append(location_model)
|
|
159
|
+
|
|
160
|
+
# stations_data = {"name": station, "lat": "42", "lon": "171",
|
|
161
|
+
# "locations": location_models}
|
|
162
|
+
# station_models = []
|
|
163
|
+
# station_model = Station(**stations_data)
|
|
164
|
+
# station_models.append(station_model)
|
|
165
|
+
|
|
166
|
+
# group_model = Group(group=group, stations=station_models)
|
|
167
|
+
|
|
168
|
+
# # Exporting to JSON
|
|
169
|
+
# json_data = group_model.json()
|
|
170
|
+
# return json_data
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
# write a function that scans LOCKERROOMROOT for
|
|
174
|
+
# available groups, stations, locations, channels, and features
|
|
175
|
+
# and returns a pydantic model
|
|
176
|
+
# def get_available_features():
|
|
177
|
+
# groups = os.listdir(ROOT)
|
|
178
|
+
# group_models = []
|
|
179
|
+
# for group in groups:
|
|
180
|
+
# stations = os.listdir(os.path.join(LOCKERROOMROOT, group))
|
|
181
|
+
# station_models = []
|
|
182
|
+
# for station in stations:
|
|
183
|
+
# locations = os.listdir(os.path.join(LOCKERROOMROOT, group, station))
|
|
184
|
+
# location_models = []
|
|
185
|
+
# for location in locations:
|
|
186
|
+
# channels = os.listdir(os.path.join(LOCKERROOMROOT, group, station, location))
|
|
187
|
+
# channel_models = []
|
|
188
|
+
# for channel in channels:
|
|
189
|
+
# features = os.listdir(os.path.join(LOCKERROOMROOT, group, station, location, channel))
|
|
190
|
+
# feature_list = []
|
|
191
|
+
# for feature in features:
|
|
192
|
+
# feature_list.append(feature)
|
|
193
|
+
# channel_data = {"name": channel, "features": feature_list}
|
|
194
|
+
# channel_model = Channel(**channel_data)
|
|
195
|
+
# channel_models.append(channel_model)
|
|
196
|
+
# location_data = {"name": location, "channels": channel_models}
|
|
197
|
+
# location_model = Location(**location_data)
|
|
198
|
+
# location_models.append(location_model)
|
|
199
|
+
# station_data = {"name": station, "lat": "42", "lon": "171", "locations": location_models}
|
|
200
|
+
# station_model = Station(**station_data)
|
|
201
|
+
# station_models.append(station_model)
|
|
202
|
+
# group_data = {"volcano": group, "stations": station_models}
|
|
203
|
+
# group_model = Group(**group_data)
|
|
204
|
+
# group_models.append(group_model)
|
|
205
|
+
# return group_models
|
|
206
|
+
|
|
207
|
+
# @app.get("/featureEndpoint")
|
|
208
|
+
# def featureEndpoint(group: str="all", station: str="all", channel: str="all",
|
|
209
|
+
# type: str="all"):
|
|
210
|
+
# groups = vm.get_available_volcanoes()
|
|
211
|
+
|
|
212
|
+
# station_model_list = []
|
|
213
|
+
# channel_model_list = []
|
|
214
|
+
# volcano_model_list = []
|
|
215
|
+
# for _volcano in volcanoes:
|
|
216
|
+
# streams = vm.get_available_streams(_volcano)
|
|
217
|
+
# for _stream in streams:
|
|
218
|
+
# _, _station, _, _channel = _stream.split('.')
|
|
219
|
+
# stream_dir = os.path.join(FEATUREDIR, _volcano, _station, _channel)
|
|
220
|
+
# try:
|
|
221
|
+
# feature_list = os.listdir(stream_dir)
|
|
222
|
+
# except (NotADirectoryError, FileNotFoundError):
|
|
223
|
+
# continue
|
|
224
|
+
# feature_list = sorted([str(os.path.basename(path)).split('.nc')[0] for path in feature_list])
|
|
225
|
+
# channels_data = {"name": _channel, "features":feature_list}
|
|
226
|
+
# channel_model = Channel(**channels_data)
|
|
227
|
+
# channel_model_list.append(channel_model)
|
|
228
|
+
# try:
|
|
229
|
+
# site_info = vm.get_site_information(_station)
|
|
230
|
+
# lat = site_info['latitude']
|
|
231
|
+
# lon = site_info['longitude']
|
|
232
|
+
# except:
|
|
233
|
+
# lat, lon = -999.9, -999.9
|
|
234
|
+
# stations_data = {"name": _station, "lat": lat, "lon": lon, "channels":channel_model_list}
|
|
235
|
+
# station_model = Station(**stations_data)
|
|
236
|
+
# station_model_list.append(station_model)
|
|
237
|
+
|
|
238
|
+
# volcano_model = Volcano(volcano=_volcano, stations=station_model_list)
|
|
239
|
+
# volcano_model_list.append(volcano_model)
|
|
240
|
+
|
|
241
|
+
# if len(volcano_model_list) == 0:
|
|
242
|
+
# return('no volcano')
|
|
243
|
+
|
|
244
|
+
# scenario_model = Scenario(scenario='VUMT', volcanoes=volcano_model_list)
|
|
245
|
+
# if volcano != "all":
|
|
246
|
+
# # return all stations for a volcano
|
|
247
|
+
# for _volcano in scenario_model.volcanoes:
|
|
248
|
+
# if _volcano.volcano == volcano:
|
|
249
|
+
# if station == "all":
|
|
250
|
+
# return _volcano
|
|
251
|
+
# for _station in _volcano.stations:
|
|
252
|
+
# if _station.name == station:
|
|
253
|
+
# if channel == "all":
|
|
254
|
+
# return _station
|
|
255
|
+
# for _channel in _station.channels:
|
|
256
|
+
# if _channel.name == channel:
|
|
257
|
+
# feature_list_filtered = []
|
|
258
|
+
# for _f in _channel.features:
|
|
259
|
+
# if _f in FeatureRequest.feat_dict[type]:
|
|
260
|
+
# feature_list_filtered.append(_f)
|
|
261
|
+
# _channel.features = feature_list_filtered
|
|
262
|
+
# return _channel
|
|
263
|
+
|
|
264
|
+
# return scenario_model
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
def main(argv=None):
|
|
269
|
+
parser = ArgumentParser()
|
|
270
|
+
parser.add_argument("--rootdir", default='/tmp')
|
|
271
|
+
args = parser.parse_args(argv)
|
|
272
|
+
ta = TonikAPI(args.rootdir)
|
|
273
|
+
uvicorn.run(ta.app, host="0.0.0.0", port=8003)
|
|
274
|
+
|
|
275
|
+
if __name__ == "__main__":
|
|
276
|
+
main()
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
<!DOCTYPE html>
|
|
2
|
+
<html lang="en">
|
|
3
|
+
<head>
|
|
4
|
+
<meta charset="UTF-8">
|
|
5
|
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
6
|
+
<title>SAM API Documentation</title>
|
|
7
|
+
<style>
|
|
8
|
+
body {
|
|
9
|
+
font-family: Arial, sans-serif;
|
|
10
|
+
margin: 0;
|
|
11
|
+
padding: 0;
|
|
12
|
+
background-color: #f5f5f5;
|
|
13
|
+
}
|
|
14
|
+
header {
|
|
15
|
+
background-color: #333;
|
|
16
|
+
color: white;
|
|
17
|
+
text-align: center;
|
|
18
|
+
padding: 1em;
|
|
19
|
+
}
|
|
20
|
+
section {
|
|
21
|
+
max-width: 800px;
|
|
22
|
+
margin: 2em auto;
|
|
23
|
+
padding: 2em;
|
|
24
|
+
background-color: white;
|
|
25
|
+
border-radius: 8px;
|
|
26
|
+
box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);
|
|
27
|
+
}
|
|
28
|
+
h1 {
|
|
29
|
+
color: #333;
|
|
30
|
+
}
|
|
31
|
+
p {
|
|
32
|
+
line-height: 1.6;
|
|
33
|
+
color: #666;
|
|
34
|
+
}
|
|
35
|
+
</style>
|
|
36
|
+
</head>
|
|
37
|
+
<body>
|
|
38
|
+
|
|
39
|
+
<header>
|
|
40
|
+
<h1>Your API Name</h1>
|
|
41
|
+
<p>Seismic Acoustic Monitoring Tool (SAM) - API</p>
|
|
42
|
+
</header>
|
|
43
|
+
|
|
44
|
+
<section>
|
|
45
|
+
<h2>Overview</h2>
|
|
46
|
+
<p>
|
|
47
|
+
Welcome to the documentation for Seismic Acoustic Monitoring API. This API provides access to waveform features and
|
|
48
|
+
analysis results computed by SAM.
|
|
49
|
+
</p>
|
|
50
|
+
|
|
51
|
+
<!--h2>Authentication</h2>
|
|
52
|
+
<p>
|
|
53
|
+
To access the API, you need to authenticate using [authentication method]. Obtain your API key from [location].
|
|
54
|
+
</p-->
|
|
55
|
+
|
|
56
|
+
<h2>Endpoints</h2>
|
|
57
|
+
<p>
|
|
58
|
+
The endpoints available in the API are:
|
|
59
|
+
</p>
|
|
60
|
+
<ul>
|
|
61
|
+
<li><strong>GET /feature:</strong> Request waveform features and analysis results.</li>
|
|
62
|
+
<li><strong>GET /featureEndpoint:</strong> Request meta information on available stations, features and results.</li>
|
|
63
|
+
</ul>
|
|
64
|
+
|
|
65
|
+
<h2>Code Examples</h2>
|
|
66
|
+
<h3>Requesting meta data</h3>
|
|
67
|
+
The following will return information on available stations, features and results in JSON format.
|
|
68
|
+
<pre>
|
|
69
|
+
curl -X GET "http://your.host.server:yourport/featureEndpoint"
|
|
70
|
+
</pre>
|
|
71
|
+
To do the same with Python using requests you can use the following code:
|
|
72
|
+
<pre>
|
|
73
|
+
import requests
|
|
74
|
+
url = "http://your.host.server:yourport/featureEndpoint"
|
|
75
|
+
response = requests.get(url)
|
|
76
|
+
response.json()
|
|
77
|
+
</pre>
|
|
78
|
+
<h3>Requesting data</h3>
|
|
79
|
+
The following example shows how to request RSAM data for station WIZ at volcano Whakaari
|
|
80
|
+
between 2019-12-01 and 2019-12-31. The return format is CSV.
|
|
81
|
+
<pre>
|
|
82
|
+
curl -X GET "http://your.host.server:yourport/feature?name=rsam&starttime=2019-12-01T00:00:00&endtime=2019-12-31T00:00:00&volcano=Whakaari&site=WIZ"
|
|
83
|
+
</pre>
|
|
84
|
+
To do the same with Python using pandas you can use the following code:
|
|
85
|
+
<pre>
|
|
86
|
+
import pandas as pd
|
|
87
|
+
feature="rsam"
|
|
88
|
+
starttime="2019-12-01T00:00:00"
|
|
89
|
+
endtime="2019-12-31T00:00:00"
|
|
90
|
+
volcano="Whakaari"
|
|
91
|
+
site="WIZ"
|
|
92
|
+
url = f"http://your.host.server:yourport/feature?name={feature}&starttime={starttime}&endtime={endtime}&volcano={volcano}&site={site}"
|
|
93
|
+
pd.read_csv(url, parse_dates=True, index_col=0)
|
|
94
|
+
</pre>
|
|
95
|
+
</section>
|
|
96
|
+
</section>
|
|
97
|
+
|
|
98
|
+
</body>
|
|
99
|
+
</html>
|