tonik 0.0.1__tar.gz → 0.0.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. tonik-0.0.3/.devcontainer/devcontainer.json +22 -0
  2. tonik-0.0.3/HOW_TO_RELEASE.md +51 -0
  3. {tonik-0.0.1 → tonik-0.0.3}/PKG-INFO +14 -9
  4. tonik-0.0.3/docs/img/Thumbs.db +0 -0
  5. tonik-0.0.3/docs/img/directory_listing.png +0 -0
  6. tonik-0.0.3/docs/img/spectrogram1.png +0 -0
  7. tonik-0.0.3/docs/img/spectrogram2.png +0 -0
  8. tonik-0.0.3/docs/img/storagegroup_output.png +0 -0
  9. tonik-0.0.3/docs/index.md +0 -0
  10. tonik-0.0.3/docs/user_guide.md +65 -0
  11. tonik-0.0.3/examples/.nfs00000001a69e0bbe002cc265 +0 -0
  12. tonik-0.0.3/examples/test_experiment/MDR1/00/HHZ/spectrogram.nc +0 -0
  13. tonik-0.0.3/examples/test_experiment/MDR2/00/HHZ/spectrogram.nc +0 -0
  14. tonik-0.0.3/examples/tonik.log +0 -0
  15. tonik-0.0.3/examples/tonik_example.ipynb +131 -0
  16. tonik-0.0.3/mkdocs.yml +5 -0
  17. {tonik-0.0.1 → tonik-0.0.3}/pyproject.toml +17 -8
  18. tonik-0.0.3/src/tonik/__init__.py +23 -0
  19. tonik-0.0.3/src/tonik/api.py +276 -0
  20. tonik-0.0.3/src/tonik/package_data/index.html +99 -0
  21. tonik-0.0.3/src/tonik/storage.py +299 -0
  22. tonik-0.0.3/src/tonik/utils.py +43 -0
  23. {tonik-0.0.1 → tonik-0.0.3}/src/tonik/xarray2hdf5.py +1 -2
  24. tonik-0.0.3/tests/conftest.py +90 -0
  25. tonik-0.0.3/tests/test_api.py +221 -0
  26. tonik-0.0.3/tests/test_group.py +137 -0
  27. tonik-0.0.3/tests/test_xarray2hdf5.py +69 -0
  28. tonik-0.0.3/tonik.log +0 -0
  29. tonik-0.0.1/.pyproject.toml.un~ +0 -0
  30. tonik-0.0.1/.pytest_cache/.gitignore +0 -2
  31. tonik-0.0.1/.pytest_cache/CACHEDIR.TAG +0 -4
  32. tonik-0.0.1/.pytest_cache/README.md +0 -8
  33. tonik-0.0.1/.pytest_cache/v/cache/lastfailed +0 -9
  34. tonik-0.0.1/.pytest_cache/v/cache/nodeids +0 -17678
  35. tonik-0.0.1/.pytest_cache/v/cache/stepwise +0 -1
  36. tonik-0.0.1/pyproject.toml~ +0 -32
  37. tonik-0.0.1/src/tonik/__init__.py +0 -1
  38. tonik-0.0.1/tests/test_xarray2hdf5.py +0 -100
  39. {tonik-0.0.1 → tonik-0.0.3}/LICENSE +0 -0
  40. {tonik-0.0.1 → tonik-0.0.3}/README.md +0 -0
@@ -0,0 +1,22 @@
1
+ // For format details, see https://aka.ms/devcontainer.json. For config options, see the
2
+ // README at: https://github.com/devcontainers/templates/tree/main/src/python
3
+ {
4
+ "name": "Python 3",
5
+ // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
6
+ "image": "mcr.microsoft.com/devcontainers/python:1-3.10-bullseye"
7
+
8
+ // Features to add to the dev container. More info: https://containers.dev/features.
9
+ // "features": {},
10
+
11
+ // Use 'forwardPorts' to make a list of ports inside the container available locally.
12
+ // "forwardPorts": [],
13
+
14
+ // Use 'postCreateCommand' to run commands after the container is created.
15
+ // "postCreateCommand": "pip3 install --user -r requirements.txt",
16
+
17
+ // Configure tool-specific properties.
18
+ // "customizations": {},
19
+
20
+ // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
21
+ // "remoteUser": "root"
22
+ }
@@ -0,0 +1,51 @@
1
+ # How to issue a new zizou release
2
+
3
+ ## Pypi
4
+ Install the build system
5
+ ```
6
+ python3 -m pip install --upgrade build
7
+ ```
8
+ Install `twine`:
9
+ ```
10
+ python3 -m pip install --upgrade twine
11
+ ```
12
+ Make sure the version number is incremented in the `pyproject.toml` file.
13
+
14
+ Run the build to create two new files under `dist`. One is the wheel and the other the packaged source code:
15
+ ```
16
+ python3 -m build
17
+ ```
18
+ Upload to pypi:
19
+ ```
20
+ python3 -m twine upload dist/*
21
+ ```
22
+ When prompted for the username enter `__token__` and paste in your pypi token as the password.
23
+
24
+ ## Documentation
25
+ Install the [mkdocs](https://www.mkdocs.org/) package and the [mkdocstrings](https://mkdocstrings.github.io/) plugin:
26
+ ```
27
+ python3 -m pip install --upgrade mkdocs "mkdocstrings[python]"
28
+ ```
29
+
30
+ To view the documentation locally run:
31
+ ```
32
+ mkdocs serve
33
+ ```
34
+ To generate documentation run:
35
+ ```
36
+ mkdocs build
37
+ ```
38
+ from the project root directory. This creates a directory called `site` containing all the necessary files to host the documentation.
39
+ Please don't add `site` to version control. If it is the first time you built the documentation, run the following:
40
+
41
+ ```
42
+ mv site ../tonik.github.io
43
+ cd ../tonik.github.io
44
+ git init
45
+ git add .
46
+ git commit -m "update documentation"
47
+ git branch -m main
48
+ git add origin git@github.com:tsc-tools/tonik.github.io.git
49
+ git push -u --force origin main
50
+ ```
51
+ [Github pages](https://pages.github.com/) will then publish the documentation under https://tsc-tools.github.io/tonik.github.io
@@ -1,8 +1,8 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.3
2
2
  Name: tonik
3
- Version: 0.0.1
3
+ Version: 0.0.3
4
4
  Summary: A collection of tools to integrate with GNS Science's time series classification platform.
5
- Project-URL: Homepage, https://github.com/tsc-tools/tonik
5
+ Project-URL: Homepage, https://tsc-tools.github.io/tonik.github.io
6
6
  Project-URL: Issues, https://github.com/tsc-tools/tonik/issues
7
7
  Author-email: Yannik Behr <y.behr@gns.cri.nz>, Christof Mueller <c.mueller@gns.cri.nz>
8
8
  License-File: LICENSE
@@ -10,12 +10,17 @@ Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
10
10
  Classifier: Operating System :: OS Independent
11
11
  Classifier: Programming Language :: Python :: 3
12
12
  Requires-Python: >=3.7
13
- Requires-Dist: h5netcdf
14
- Requires-Dist: h5py
15
- Requires-Dist: netcdf4
16
- Requires-Dist: pandas
17
- Requires-Dist: pytest
18
- Requires-Dist: xarray
13
+ Requires-Dist: datashader>=0.14
14
+ Requires-Dist: fastapi>=0.95
15
+ Requires-Dist: h5netcdf>=1.1
16
+ Requires-Dist: h5py>=3.8
17
+ Requires-Dist: netcdf4>=1.6
18
+ Requires-Dist: pandas>=2.0
19
+ Requires-Dist: python-json-logger>=2.0
20
+ Requires-Dist: uvicorn[standard]>=0.22
21
+ Requires-Dist: xarray>=2023.4
22
+ Provides-Extra: dev
23
+ Requires-Dist: pytest; extra == 'dev'
19
24
  Description-Content-Type: text/markdown
20
25
 
21
26
  # Time series classification tools
Binary file
Binary file
Binary file
File without changes
@@ -0,0 +1,65 @@
1
+ ## Examples on how to store and retrieve data using lockerroom
2
+
3
+ ```python
4
+ from datetime import datetime
5
+ import os
6
+ import numpy as np
7
+ import pandas as pd
8
+ import xarray as xr
9
+ from tonik import StorageGroup
10
+ ```
11
+
12
+ A locker room consists of many lockers. Each locker stores data for a single sensor and the locker room groups sensors by, for example, experiment or geographic location. We will start by generating fake spectrogram data.
13
+
14
+ ```python
15
+ dates = pd.date_range("2024-01-02", freq='10min', periods=288)
16
+ data = np.abs(np.cumsum(np.random.normal(0, 8., len(dates))))
17
+ data = np.tile(data, (10, 1))
18
+ freqs = np.arange(10)
19
+ xrd = xr.Dataset({'spectrogram': xr.DataArray(data, coords=[freqs, dates],
20
+ dims=['frequency', 'datetime'])})
21
+ xrd['spectrogram'].plot()
22
+ ```
23
+
24
+ ![1st spectrogram](img/spectrogram1.png)
25
+
26
+ Now we will store the data under two different sites of the same experiment.
27
+
28
+ ```python
29
+ g = StorageGroup('test_experiment', rootdir='/tmp')
30
+ st1 = g.get_store(site='MDR1', sensor='00', channel='HHZ')
31
+ st2 = g.get_store(site='MDR2', sensor='00', channel='HHZ')
32
+ st1.save(xrd)
33
+ st2.save(xrd)
34
+ g
35
+ ```
36
+
37
+ ![lockerroom output](img/storagegroup_output.png)
38
+
39
+ Next we want to retrieve the data we just saved. Before retrieving data we have to set the timespan over which we want to retrieve data.
40
+
41
+ ```python
42
+ g.starttime = datetime(2024, 1, 2, 18, 0, 0)
43
+ g.endtime = datetime(2024, 1, 3, 6, 0, 0)
44
+ st = g.get_store('MDR2', '00', 'HHZ')
45
+ st('spectrogram').plot()
46
+ ```
47
+
48
+ ![2nd spectrogram](img/spectrogram2.png)
49
+
50
+ Tonik comes with an API to access the stored data from other applications. To test the API, first start it in a terminal:
51
+ ```
52
+ tonik_api --rootdir /tmp
53
+ ```
54
+
55
+ Now you can access the same feature through the API:
56
+
57
+ ```python
58
+ url = "http://localhost:8003/feature?"
59
+ url += f"group={g.name}&site=MDR2&sensor=00&channel=HHZ&name=spectrogram"
60
+ url += f"&starttime={g.starttime.isoformat()}&endtime={g.endtime.isoformat()}"
61
+ spec = pd.read_csv(url, parse_dates=True, index_col=[0, 1], date_format='ISO8601').to_xarray()
62
+ spec.transpose('freqs', 'dates')['feature'].plot()
63
+ ```
64
+
65
+ ![2nd spectrogram](img/spectrogram2.png)
File without changes
File without changes
@@ -0,0 +1,131 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "## Examples on how to store and retrieve data using lockerroom"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": null,
13
+ "metadata": {},
14
+ "outputs": [],
15
+ "source": [
16
+ "from datetime import datetime\n",
17
+ "import os\n",
18
+ "import numpy as np\n",
19
+ "import pandas as pd\n",
20
+ "import xarray as xr\n",
21
+ "from tonik import StorageGroup "
22
+ ]
23
+ },
24
+ {
25
+ "cell_type": "markdown",
26
+ "metadata": {},
27
+ "source": [
28
+ "A locker room consists of many lockers. Each locker stores data for a single sensor and the locker room groups sensors by, for example, experiment or geographic location. We will start by generating fake spectrogram data."
29
+ ]
30
+ },
31
+ {
32
+ "cell_type": "code",
33
+ "execution_count": null,
34
+ "metadata": {},
35
+ "outputs": [],
36
+ "source": [
37
+ "dates = pd.date_range(\"2024-01-02\", freq='10min', periods=288)\n",
38
+ "data = np.abs(np.cumsum(np.random.normal(0, 8., len(dates))))\n",
39
+ "data = np.tile(data, (10, 1))\n",
40
+ "freqs = np.arange(10)\n",
41
+ "xrd = xr.Dataset({'spectrogram': xr.DataArray(data, coords=[freqs, dates],\n",
42
+ " dims=['frequency', 'datetime'])})\n",
43
+ "fig = xrd['spectrogram'].plot()"
44
+ ]
45
+ },
46
+ {
47
+ "cell_type": "markdown",
48
+ "metadata": {},
49
+ "source": [
50
+ "Now we will store the data under two different sites of the same experiment."
51
+ ]
52
+ },
53
+ {
54
+ "cell_type": "code",
55
+ "execution_count": null,
56
+ "metadata": {},
57
+ "outputs": [],
58
+ "source": [
59
+ "g = StorageGroup('test_experiment', rootdir='/tmp')\n",
60
+ "st1 = g.get_store(site='MDR1', sensor='00', channel='HHZ')\n",
61
+ "st2 = g.get_store(site='MDR2', sensor='00', channel='HHZ')\n",
62
+ "st1.save(xrd)\n",
63
+ "st2.save(xrd)\n",
64
+ "g"
65
+ ]
66
+ },
67
+ {
68
+ "cell_type": "markdown",
69
+ "metadata": {},
70
+ "source": [
71
+ "Next we want to retrieve the data we just saved. Before retrieving data we have to set the timespan over which we want to retrieve data."
72
+ ]
73
+ },
74
+ {
75
+ "cell_type": "code",
76
+ "execution_count": null,
77
+ "metadata": {},
78
+ "outputs": [],
79
+ "source": [
80
+ "g.starttime = datetime(2024, 1, 2, 18, 0, 0)\n",
81
+ "g.endtime = datetime(2024, 1, 3, 6, 0, 0)\n",
82
+ "st = g.get_store('MDR2', '00', 'HHZ')\n",
83
+ "st('spectrogram').plot()"
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "code",
88
+ "execution_count": null,
89
+ "metadata": {},
90
+ "outputs": [],
91
+ "source": [
92
+ "# Start the API server by running the following command in the terminal\n",
93
+ "# tonik_api --rootdir examples"
94
+ ]
95
+ },
96
+ {
97
+ "cell_type": "code",
98
+ "execution_count": null,
99
+ "metadata": {},
100
+ "outputs": [],
101
+ "source": [
102
+ "url = \"http://localhost:8003/feature?\"\n",
103
+ "url += f\"group={g.name}&site=MDR2&sensor=00&channel=HHZ&name=spectrogram\"\n",
104
+ "url += f\"&starttime={g.starttime.isoformat()}&endtime={g.endtime.isoformat()}\" \n",
105
+ "spec = pd.read_csv(url, parse_dates=True, index_col=[0, 1], date_format='ISO8601').to_xarray()\n",
106
+ "spec.transpose('freqs', 'dates')['feature'].plot()"
107
+ ]
108
+ }
109
+ ],
110
+ "metadata": {
111
+ "kernelspec": {
112
+ "display_name": "Python 3",
113
+ "language": "python",
114
+ "name": "python3"
115
+ },
116
+ "language_info": {
117
+ "codemirror_mode": {
118
+ "name": "ipython",
119
+ "version": 3
120
+ },
121
+ "file_extension": ".py",
122
+ "mimetype": "text/x-python",
123
+ "name": "python",
124
+ "nbconvert_exporter": "python",
125
+ "pygments_lexer": "ipython3",
126
+ "version": "3.10.14"
127
+ }
128
+ },
129
+ "nbformat": 4,
130
+ "nbformat_minor": 2
131
+ }
tonik-0.0.3/mkdocs.yml ADDED
@@ -0,0 +1,5 @@
1
+ site_name: Tonik
2
+ nav:
3
+ - Home: index.md
4
+ - User Guide: user_guide.md
5
+ theme: readthedocs
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "tonik"
7
- version = "0.0.1"
7
+ version = "0.0.3"
8
8
  authors = [
9
9
  { name="Yannik Behr", email="y.behr@gns.cri.nz" },
10
10
  { name="Christof Mueller", email="c.mueller@gns.cri.nz" }
@@ -19,14 +19,23 @@ classifiers = [
19
19
  "Operating System :: OS Independent",
20
20
  ]
21
21
  dependencies = [
22
- "h5py",
23
- "xarray",
24
- "pandas",
25
- "netcdf4",
26
- "h5netcdf",
27
- "pytest"
22
+ "h5py>=3.8",
23
+ "datashader>=0.14",
24
+ "xarray>=2023.4",
25
+ "pandas>=2.0",
26
+ "netcdf4>=1.6",
27
+ "h5netcdf>=1.1",
28
+ "python-json-logger>=2.0",
29
+ "uvicorn[standard]>=0.22",
30
+ "fastapi>=0.95"
28
31
  ]
29
32
 
33
+ [project.optional-dependencies]
34
+ dev = ["pytest"]
35
+
30
36
  [project.urls]
31
- Homepage = "https://github.com/tsc-tools/tonik"
37
+ Homepage = "https://tsc-tools.github.io/tonik.github.io"
32
38
  Issues = "https://github.com/tsc-tools/tonik/issues"
39
+
40
+ [project.scripts]
41
+ tonik_api = "tonik.api:main"
@@ -0,0 +1,23 @@
1
+ import importlib
2
+ from os import PathLike
3
+ from typing import Optional
4
+
5
+ from .storage import StorageGroup, Path
6
+ from .utils import generate_test_data
7
+
8
+
9
+ def get_data(filename: Optional[PathLike] = None) -> str:
10
+ """Return path to tonik package.
11
+
12
+ Parameters
13
+ ----------
14
+ filename : Pathlike, default None
15
+ Append `filename` to returned path.
16
+
17
+ Returns
18
+ -------
19
+ pkgdir_path
20
+
21
+ """
22
+ f = importlib.resources.files(__package__)
23
+ return str(f) if filename is None else str(f / filename)
@@ -0,0 +1,276 @@
1
+ from argparse import ArgumentParser
2
+ from datetime import timedelta, datetime
3
+ import logging
4
+ import os
5
+
6
+ from cftime import num2date, date2num
7
+ import datashader as dsh
8
+ import numpy as np
9
+ import pandas as pd
10
+ import uvicorn
11
+ from fastapi import FastAPI, HTTPException
12
+ from fastapi.middleware.cors import CORSMiddleware
13
+ from fastapi.responses import HTMLResponse, StreamingResponse
14
+ from pydantic import BaseModel
15
+ from typing import List
16
+
17
+ from .storage import StorageGroup
18
+ from . import get_data
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ class TonikAPI:
24
+
25
+ def __init__(self, rootdir) -> None:
26
+ self.rootdir = rootdir
27
+ self.app = FastAPI()
28
+
29
+ # -- allow any origin to query API
30
+ self.app.add_middleware(CORSMiddleware,
31
+ allow_origins=["*"])
32
+
33
+ self.app.get("/", response_class=HTMLResponse)(self.root)
34
+ self.app.get("/feature")(self.feature)
35
+
36
+ async def root(self):
37
+ with open(get_data("package_data/index.html"), "r", encoding="utf-8") as file:
38
+ html_content = file.read()
39
+ return HTMLResponse(content=html_content, status_code=200)
40
+
41
+
42
+ def feature(self,
43
+ name: str='rsam',
44
+ group: str='Ruapehu',
45
+ site: str='MAVZ',
46
+ sensor: str='10',
47
+ channel: str='HHZ',
48
+ starttime: datetime=datetime.utcnow()-timedelta(days=30),
49
+ endtime: datetime=datetime.utcnow(),
50
+ resolution: str='full',
51
+ verticalres: int=10,
52
+ log: bool=True,
53
+ normalise: bool=False):
54
+
55
+ _st = datetime.fromisoformat(str(starttime))
56
+ _st = _st.replace(tzinfo=None)
57
+ _et = datetime.fromisoformat(str(endtime))
58
+ _et = _et.replace(tzinfo=None)
59
+ g = StorageGroup(group, rootdir=self.rootdir,
60
+ starttime=_st, endtime=_et)
61
+ c = g.get_store(site=site, sensor=sensor, channel=channel)
62
+ try:
63
+ feat = c(name)
64
+ except ValueError as e:
65
+ msg = f"Feature {name} not found in directory {l.sitedir}:"
66
+ msg += f"{e}"
67
+ raise HTTPException(status_code=404, detail=msg)
68
+ if len(feat.shape) > 1:
69
+ # assume first dimension is frequency
70
+ nfreqs = feat.shape[0]
71
+ dates = feat.coords[feat.dims[1]].values
72
+ if resolution != 'full':
73
+ freq, dates, spec = self.aggregate_feature(resolution, verticalres, feat, nfreqs, dates)
74
+ else:
75
+ spec = feat.values
76
+ freq = feat.coords[feat.dims[0]].values
77
+ vals = spec.ravel(order='C')
78
+ if log and feat.name != 'sonogram':
79
+ vals = 10*np.log10(vals)
80
+ if normalise:
81
+ vals = (vals - np.nanmin(vals))/(np.nanmax(vals) - np.nanmin(vals))
82
+ freqs = freq.repeat(dates.size)
83
+ dates = np.tile(dates, freq.size)
84
+ df = pd.DataFrame({'dates': dates, 'freqs': freqs, 'feature': vals})
85
+ output = df.to_csv(index=False,
86
+ columns=['dates', 'freqs', 'feature'])
87
+ else:
88
+ df = pd.DataFrame(data=feat.to_pandas(), columns=[feat.name])
89
+ df['dates'] = df.index
90
+ try:
91
+ df = df.resample(str(float(resolution)/60000.0)+'T').mean()
92
+ except ValueError as e:
93
+ logger.warning(f"Cannot resample {feat.name} to {resolution}: e")
94
+ df.rename(columns={feat.name: 'feature'}, inplace=True)
95
+ output = df.to_csv(index=False, columns=['dates', 'feature'])
96
+ return StreamingResponse(iter([output]),
97
+ media_type='text/csv',
98
+ headers={"Content-Disposition":
99
+ "attachment;filename=<VUMT_feature>.csv",
100
+ 'Content-Length': str(len(output))})
101
+
102
+
103
+ def aggregate_feature(self, resolution, verticalres, feat, nfreqs, dates):
104
+ resolution = np.timedelta64(pd.Timedelta(resolution), 'ms').astype(float)
105
+ ndays = np.timedelta64(dates[-1] - dates[0], 'ms').astype(float)
106
+ canvas_x = int(ndays/resolution)
107
+ canvas_y = min(nfreqs, verticalres)
108
+ dates = date2num(dates.astype('datetime64[us]').astype(datetime),
109
+ units='hours since 1970-01-01 00:00:00.0',
110
+ calendar='gregorian')
111
+ feat = feat.assign_coords({'datetime': dates})
112
+ cvs = dsh.Canvas(plot_width=canvas_x,
113
+ plot_height=canvas_y)
114
+ agg = cvs.raster(source=feat)
115
+ freq_dim = feat.dims[0]
116
+ freq, d, spec = agg.coords[freq_dim].values, agg.coords['datetime'].values, agg.data
117
+ dates = num2date(d, units='hours since 1970-01-01 00:00:00.0', calendar='gregorian')
118
+ return freq,dates,spec
119
+
120
+
121
+ # #pydanticmodel output: Json file
122
+ # class Feature(BaseModel):
123
+ # name: list
124
+
125
+
126
+ # class Channel(BaseModel):
127
+ # name: str
128
+ # features: List[Feature] = []
129
+
130
+
131
+ # class Location(BaseModel):
132
+ # name: str
133
+ # channels: List[Channel] = []
134
+
135
+
136
+ # class Station(BaseModel):
137
+ # name: str
138
+ # lat: float
139
+ # lon: float
140
+ # locations: List[Location] = []
141
+
142
+
143
+ # class Group(BaseModel):
144
+ # volcano: str
145
+ # stations: List[Station] = []
146
+
147
+
148
+ # def get_pydanticModel(group, station, location, channel, feature_list):
149
+
150
+ # channels_data = {"name": channel, "features": feature_list}
151
+ # channel_models = []
152
+ # channel_model = Channel(**channels_data)
153
+ # channel_models.append(channel_model)
154
+
155
+ # location_data = {"name": location, "channels": channel_models}
156
+ # location_models = []
157
+ # location_model = Location(**location_data)
158
+ # location_models.append(location_model)
159
+
160
+ # stations_data = {"name": station, "lat": "42", "lon": "171",
161
+ # "locations": location_models}
162
+ # station_models = []
163
+ # station_model = Station(**stations_data)
164
+ # station_models.append(station_model)
165
+
166
+ # group_model = Group(group=group, stations=station_models)
167
+
168
+ # # Exporting to JSON
169
+ # json_data = group_model.json()
170
+ # return json_data
171
+
172
+
173
+ # write a function that scans LOCKERROOMROOT for
174
+ # available groups, stations, locations, channels, and features
175
+ # and returns a pydantic model
176
+ # def get_available_features():
177
+ # groups = os.listdir(ROOT)
178
+ # group_models = []
179
+ # for group in groups:
180
+ # stations = os.listdir(os.path.join(LOCKERROOMROOT, group))
181
+ # station_models = []
182
+ # for station in stations:
183
+ # locations = os.listdir(os.path.join(LOCKERROOMROOT, group, station))
184
+ # location_models = []
185
+ # for location in locations:
186
+ # channels = os.listdir(os.path.join(LOCKERROOMROOT, group, station, location))
187
+ # channel_models = []
188
+ # for channel in channels:
189
+ # features = os.listdir(os.path.join(LOCKERROOMROOT, group, station, location, channel))
190
+ # feature_list = []
191
+ # for feature in features:
192
+ # feature_list.append(feature)
193
+ # channel_data = {"name": channel, "features": feature_list}
194
+ # channel_model = Channel(**channel_data)
195
+ # channel_models.append(channel_model)
196
+ # location_data = {"name": location, "channels": channel_models}
197
+ # location_model = Location(**location_data)
198
+ # location_models.append(location_model)
199
+ # station_data = {"name": station, "lat": "42", "lon": "171", "locations": location_models}
200
+ # station_model = Station(**station_data)
201
+ # station_models.append(station_model)
202
+ # group_data = {"volcano": group, "stations": station_models}
203
+ # group_model = Group(**group_data)
204
+ # group_models.append(group_model)
205
+ # return group_models
206
+
207
+ # @app.get("/featureEndpoint")
208
+ # def featureEndpoint(group: str="all", station: str="all", channel: str="all",
209
+ # type: str="all"):
210
+ # groups = vm.get_available_volcanoes()
211
+
212
+ # station_model_list = []
213
+ # channel_model_list = []
214
+ # volcano_model_list = []
215
+ # for _volcano in volcanoes:
216
+ # streams = vm.get_available_streams(_volcano)
217
+ # for _stream in streams:
218
+ # _, _station, _, _channel = _stream.split('.')
219
+ # stream_dir = os.path.join(FEATUREDIR, _volcano, _station, _channel)
220
+ # try:
221
+ # feature_list = os.listdir(stream_dir)
222
+ # except (NotADirectoryError, FileNotFoundError):
223
+ # continue
224
+ # feature_list = sorted([str(os.path.basename(path)).split('.nc')[0] for path in feature_list])
225
+ # channels_data = {"name": _channel, "features":feature_list}
226
+ # channel_model = Channel(**channels_data)
227
+ # channel_model_list.append(channel_model)
228
+ # try:
229
+ # site_info = vm.get_site_information(_station)
230
+ # lat = site_info['latitude']
231
+ # lon = site_info['longitude']
232
+ # except:
233
+ # lat, lon = -999.9, -999.9
234
+ # stations_data = {"name": _station, "lat": lat, "lon": lon, "channels":channel_model_list}
235
+ # station_model = Station(**stations_data)
236
+ # station_model_list.append(station_model)
237
+
238
+ # volcano_model = Volcano(volcano=_volcano, stations=station_model_list)
239
+ # volcano_model_list.append(volcano_model)
240
+
241
+ # if len(volcano_model_list) == 0:
242
+ # return('no volcano')
243
+
244
+ # scenario_model = Scenario(scenario='VUMT', volcanoes=volcano_model_list)
245
+ # if volcano != "all":
246
+ # # return all stations for a volcano
247
+ # for _volcano in scenario_model.volcanoes:
248
+ # if _volcano.volcano == volcano:
249
+ # if station == "all":
250
+ # return _volcano
251
+ # for _station in _volcano.stations:
252
+ # if _station.name == station:
253
+ # if channel == "all":
254
+ # return _station
255
+ # for _channel in _station.channels:
256
+ # if _channel.name == channel:
257
+ # feature_list_filtered = []
258
+ # for _f in _channel.features:
259
+ # if _f in FeatureRequest.feat_dict[type]:
260
+ # feature_list_filtered.append(_f)
261
+ # _channel.features = feature_list_filtered
262
+ # return _channel
263
+
264
+ # return scenario_model
265
+
266
+
267
+
268
+ def main(argv=None):
269
+ parser = ArgumentParser()
270
+ parser.add_argument("--rootdir", default='/tmp')
271
+ args = parser.parse_args(argv)
272
+ ta = TonikAPI(args.rootdir)
273
+ uvicorn.run(ta.app, host="0.0.0.0", port=8003)
274
+
275
+ if __name__ == "__main__":
276
+ main()