hydroserverpy 1.2.1__py3-none-any.whl → 1.3.0b2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hydroserverpy might be problematic. Click here for more details.
- hydroserverpy/__init__.py +1 -1
- hydroserverpy/api/{main.py → client.py} +52 -22
- hydroserverpy/api/models/__init__.py +1 -2
- hydroserverpy/api/models/base.py +180 -47
- hydroserverpy/api/models/etl/data_archive.py +31 -59
- hydroserverpy/api/models/etl/data_source.py +34 -76
- hydroserverpy/api/models/etl/orchestration_system.py +23 -38
- hydroserverpy/api/models/iam/apikey.py +57 -38
- hydroserverpy/api/models/iam/collaborator.py +55 -19
- hydroserverpy/api/models/iam/role.py +32 -4
- hydroserverpy/api/models/iam/workspace.py +58 -86
- hydroserverpy/api/models/sta/datastream.py +122 -214
- hydroserverpy/api/models/sta/observation.py +101 -0
- hydroserverpy/api/models/sta/observed_property.py +18 -53
- hydroserverpy/api/models/sta/processing_level.py +16 -31
- hydroserverpy/api/models/sta/result_qualifier.py +16 -31
- hydroserverpy/api/models/sta/sensor.py +27 -88
- hydroserverpy/api/models/sta/thing.py +48 -152
- hydroserverpy/api/models/sta/unit.py +16 -29
- hydroserverpy/api/services/__init__.py +1 -0
- hydroserverpy/api/services/base.py +92 -76
- hydroserverpy/api/services/etl/data_archive.py +42 -72
- hydroserverpy/api/services/etl/data_source.py +42 -72
- hydroserverpy/api/services/etl/orchestration_system.py +25 -33
- hydroserverpy/api/services/iam/role.py +38 -0
- hydroserverpy/api/services/iam/workspace.py +96 -99
- hydroserverpy/api/services/sta/datastream.py +151 -210
- hydroserverpy/api/services/sta/observed_property.py +31 -49
- hydroserverpy/api/services/sta/processing_level.py +30 -36
- hydroserverpy/api/services/sta/result_qualifier.py +24 -34
- hydroserverpy/api/services/sta/sensor.py +34 -48
- hydroserverpy/api/services/sta/thing.py +96 -89
- hydroserverpy/api/services/sta/unit.py +30 -34
- hydroserverpy/api/utils.py +22 -0
- hydroserverpy/etl/extractors/base.py +2 -4
- hydroserverpy/etl/loaders/hydroserver_loader.py +1 -0
- hydroserverpy/etl/timestamp_parser.py +82 -48
- hydroserverpy/etl/transformers/base.py +5 -9
- hydroserverpy/etl_csv/hydroserver_etl_csv.py +1 -1
- {hydroserverpy-1.2.1.dist-info → hydroserverpy-1.3.0b2.dist-info}/METADATA +1 -1
- hydroserverpy-1.3.0b2.dist-info/RECORD +70 -0
- hydroserverpy/api/http.py +0 -22
- hydroserverpy-1.2.1.dist-info/RECORD +0 -68
- {hydroserverpy-1.2.1.dist-info → hydroserverpy-1.3.0b2.dist-info}/WHEEL +0 -0
- {hydroserverpy-1.2.1.dist-info → hydroserverpy-1.3.0b2.dist-info}/licenses/LICENSE +0 -0
- {hydroserverpy-1.2.1.dist-info → hydroserverpy-1.3.0b2.dist-info}/top_level.txt +0 -0
- {hydroserverpy-1.2.1.dist-info → hydroserverpy-1.3.0b2.dist-info}/zip-safe +0 -0
|
@@ -1,9 +1,11 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
from
|
|
1
|
+
import uuid
|
|
2
|
+
import pandas as pd
|
|
3
|
+
from typing import List, Union, Optional, Literal, ClassVar, TYPE_CHECKING
|
|
4
|
+
from pydantic import Field
|
|
4
5
|
from uuid import UUID
|
|
5
6
|
from datetime import datetime
|
|
6
|
-
from
|
|
7
|
+
from hydroserverpy.api.utils import normalize_uuid
|
|
8
|
+
from ..base import HydroServerBaseModel
|
|
7
9
|
|
|
8
10
|
if TYPE_CHECKING:
|
|
9
11
|
from hydroserverpy import HydroServer
|
|
@@ -14,132 +16,51 @@ if TYPE_CHECKING:
|
|
|
14
16
|
ObservedProperty,
|
|
15
17
|
Unit,
|
|
16
18
|
ProcessingLevel,
|
|
19
|
+
DataSource,
|
|
20
|
+
DataArchive
|
|
17
21
|
)
|
|
18
22
|
|
|
19
23
|
|
|
20
|
-
class
|
|
24
|
+
class Datastream(HydroServerBaseModel):
|
|
21
25
|
name: str = Field(..., max_length=255)
|
|
22
26
|
description: str
|
|
23
27
|
observation_type: str = Field(..., max_length=255)
|
|
24
|
-
sampled_medium: str = Field(
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
)
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
max_length=255,
|
|
40
|
-
validation_alias=AliasChoices(
|
|
41
|
-
"aggregationStatistic", AliasPath("properties", "aggregationStatistic")
|
|
42
|
-
),
|
|
43
|
-
)
|
|
44
|
-
time_aggregation_interval: float = Field(
|
|
45
|
-
...,
|
|
46
|
-
validation_alias=AliasChoices(
|
|
47
|
-
"timeAggregationInterval",
|
|
48
|
-
AliasPath("properties", "timeAggregationInterval"),
|
|
49
|
-
),
|
|
50
|
-
)
|
|
51
|
-
status: Optional[str] = Field(
|
|
52
|
-
None,
|
|
53
|
-
max_length=255,
|
|
54
|
-
validation_alias=AliasChoices("status", AliasPath("properties", "status")),
|
|
55
|
-
)
|
|
56
|
-
result_type: str = Field(
|
|
57
|
-
...,
|
|
58
|
-
max_length=255,
|
|
59
|
-
validation_alias=AliasChoices(
|
|
60
|
-
"resultType", AliasPath("properties", "resultType")
|
|
61
|
-
),
|
|
62
|
-
)
|
|
63
|
-
value_count: Optional[int] = Field(
|
|
64
|
-
None,
|
|
65
|
-
ge=0,
|
|
66
|
-
validation_alias=AliasChoices(
|
|
67
|
-
"valueCount", AliasPath("properties", "valueCount")
|
|
68
|
-
),
|
|
69
|
-
)
|
|
70
|
-
phenomenon_begin_time: Optional[datetime] = Field(
|
|
71
|
-
None, validation_alias=AliasChoices("phenomenonBeginTime", "phenomenonTime")
|
|
72
|
-
)
|
|
73
|
-
phenomenon_end_time: Optional[datetime] = Field(
|
|
74
|
-
None, validation_alias=AliasChoices("phenomenonEndTime", "phenomenonTime")
|
|
75
|
-
)
|
|
76
|
-
result_begin_time: Optional[datetime] = Field(
|
|
77
|
-
None, validation_alias=AliasChoices("resultBeginTime", "resultTime")
|
|
78
|
-
)
|
|
79
|
-
result_end_time: Optional[datetime] = Field(
|
|
80
|
-
None, validation_alias=AliasChoices("resultEndTime", "resultTime")
|
|
81
|
-
)
|
|
82
|
-
is_private: bool = Field(
|
|
83
|
-
False,
|
|
84
|
-
validation_alias=AliasChoices(
|
|
85
|
-
"isPrivate", AliasPath("properties", "isPrivate")
|
|
86
|
-
),
|
|
87
|
-
)
|
|
88
|
-
is_visible: bool = Field(
|
|
89
|
-
True,
|
|
90
|
-
validation_alias=AliasChoices(
|
|
91
|
-
"isVisible", AliasPath("properties", "isVisible")
|
|
92
|
-
),
|
|
93
|
-
)
|
|
94
|
-
time_aggregation_interval_unit: Literal["seconds", "minutes", "hours", "days"] = (
|
|
95
|
-
Field(
|
|
96
|
-
...,
|
|
97
|
-
validation_alias=AliasChoices(
|
|
98
|
-
"timeAggregationIntervalUnit",
|
|
99
|
-
AliasPath("properties", "timeAggregationIntervalUnitOfMeasurement"),
|
|
100
|
-
),
|
|
101
|
-
)
|
|
102
|
-
)
|
|
103
|
-
intended_time_spacing: Optional[float] = Field(
|
|
104
|
-
None,
|
|
105
|
-
validation_alias=AliasChoices(
|
|
106
|
-
"intendedTimeSpacing", AliasPath("properties", "intendedTimeSpacing")
|
|
107
|
-
),
|
|
108
|
-
)
|
|
28
|
+
sampled_medium: str = Field(..., max_length=255)
|
|
29
|
+
no_data_value: float
|
|
30
|
+
aggregation_statistic: str = Field(..., max_length=255)
|
|
31
|
+
time_aggregation_interval: float
|
|
32
|
+
status: Optional[str] = Field(None, max_length=255)
|
|
33
|
+
result_type: str = Field(..., max_length=255)
|
|
34
|
+
value_count: Optional[int] = Field(None, ge=0)
|
|
35
|
+
phenomenon_begin_time: Optional[datetime] = None
|
|
36
|
+
phenomenon_end_time: Optional[datetime] = None
|
|
37
|
+
result_begin_time: Optional[datetime] = None
|
|
38
|
+
result_end_time: Optional[datetime] = None
|
|
39
|
+
is_private: bool = False
|
|
40
|
+
is_visible: bool = True
|
|
41
|
+
time_aggregation_interval_unit: Literal["seconds", "minutes", "hours", "days"]
|
|
42
|
+
intended_time_spacing: Optional[float] = None
|
|
109
43
|
intended_time_spacing_unit: Optional[
|
|
110
44
|
Literal["seconds", "minutes", "hours", "days"]
|
|
111
|
-
] =
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
"
|
|
122
|
-
"
|
|
123
|
-
"result_end_time",
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
return value
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
class Datastream(HydroServerModel, DatastreamFields):
|
|
134
|
-
def __init__(
|
|
135
|
-
self,
|
|
136
|
-
_connection: "HydroServer",
|
|
137
|
-
_uid: Union[UUID, str],
|
|
138
|
-
**data,
|
|
139
|
-
):
|
|
140
|
-
super().__init__(
|
|
141
|
-
_connection=_connection, _model_ref="datastreams", _uid=_uid, **data
|
|
142
|
-
)
|
|
45
|
+
] = None
|
|
46
|
+
data_source_id: Optional[uuid.UUID] = None
|
|
47
|
+
thing_id: uuid.UUID
|
|
48
|
+
workspace_id: uuid.UUID
|
|
49
|
+
sensor_id: uuid.UUID
|
|
50
|
+
observed_property_id: uuid.UUID
|
|
51
|
+
processing_level_id: uuid.UUID
|
|
52
|
+
unit_id: uuid.UUID
|
|
53
|
+
|
|
54
|
+
_editable_fields: ClassVar[set[str]] = {
|
|
55
|
+
"name", "description", "observation_type", "sampled_medium", "no_data_value", "aggregation_statistic",
|
|
56
|
+
"time_aggregation_interval", "status", "result_type", "value_count", "phenomenon_begin_time",
|
|
57
|
+
"phenomenon_end_time", "result_begin_time", "result_end_time", "is_private", "is_visible",
|
|
58
|
+
"time_aggregation_interval_unit", "intended_time_spacing", "intended_time_spacing_unit", "thing_id",
|
|
59
|
+
"sensor_id", "observed_property_id", "processing_level_id", "unit_id"
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
def __init__(self, client: "HydroServer", **data):
|
|
63
|
+
super().__init__(client=client, service=client.datastreams, **data)
|
|
143
64
|
|
|
144
65
|
self._workspace = None
|
|
145
66
|
self._thing = None
|
|
@@ -147,14 +68,19 @@ class Datastream(HydroServerModel, DatastreamFields):
|
|
|
147
68
|
self._unit = None
|
|
148
69
|
self._processing_level = None
|
|
149
70
|
self._sensor = None
|
|
71
|
+
self._data_source = None
|
|
72
|
+
self._data_archives = None
|
|
73
|
+
|
|
74
|
+
@classmethod
|
|
75
|
+
def get_route(cls):
|
|
76
|
+
return "datastreams"
|
|
150
77
|
|
|
151
78
|
@property
|
|
152
79
|
def workspace(self) -> "Workspace":
|
|
153
80
|
"""The workspace this datastream belongs to."""
|
|
154
81
|
|
|
155
82
|
if self._workspace is None:
|
|
156
|
-
|
|
157
|
-
self._workspace = self._connection.workspaces.get(uid=datastream["workspaceId"])
|
|
83
|
+
self._workspace = self.client.workspaces.get(uid=self.workspace_id)
|
|
158
84
|
|
|
159
85
|
return self._workspace
|
|
160
86
|
|
|
@@ -163,179 +89,161 @@ class Datastream(HydroServerModel, DatastreamFields):
|
|
|
163
89
|
"""The thing this datastream belongs to."""
|
|
164
90
|
|
|
165
91
|
if self._thing is None:
|
|
166
|
-
self._thing = self.
|
|
167
|
-
uid=self.uid,
|
|
168
|
-
fetch_by_datastream_uid=True,
|
|
169
|
-
)
|
|
170
|
-
self._original_data["thing"] = self._thing
|
|
92
|
+
self._thing = self.client.things.get(uid=self.thing_id)
|
|
171
93
|
|
|
172
94
|
return self._thing
|
|
173
95
|
|
|
174
96
|
@thing.setter
|
|
175
|
-
def thing(self, thing: Union["Thing", UUID, str]):
|
|
97
|
+
def thing(self, thing: Union["Thing", UUID, str] = ...):
|
|
176
98
|
if not thing:
|
|
177
99
|
raise ValueError("Thing of datastream cannot be None.")
|
|
178
|
-
if
|
|
179
|
-
self.
|
|
180
|
-
|
|
181
|
-
)
|
|
100
|
+
if normalize_uuid(thing) != str(self.thing_id):
|
|
101
|
+
self.thing_id = normalize_uuid(thing)
|
|
102
|
+
self._thing = None
|
|
182
103
|
|
|
183
104
|
@property
|
|
184
105
|
def sensor(self) -> "Sensor":
|
|
185
|
-
"""The sensor this datastream
|
|
106
|
+
"""The sensor of this datastream."""
|
|
186
107
|
|
|
187
108
|
if self._sensor is None:
|
|
188
|
-
self._sensor = self.
|
|
189
|
-
uid=self.uid,
|
|
190
|
-
fetch_by_datastream_uid=True,
|
|
191
|
-
)
|
|
192
|
-
self._original_data["sensor"] = self._sensor
|
|
109
|
+
self._sensor = self.client.sensors.get(uid=self.sensor_id)
|
|
193
110
|
|
|
194
111
|
return self._sensor
|
|
195
112
|
|
|
196
113
|
@sensor.setter
|
|
197
|
-
def sensor(self, sensor: Union["Sensor", UUID, str]):
|
|
114
|
+
def sensor(self, sensor: Union["Sensor", UUID, str] = ...):
|
|
198
115
|
if not sensor:
|
|
199
116
|
raise ValueError("Sensor of datastream cannot be None.")
|
|
200
|
-
if
|
|
201
|
-
self.
|
|
202
|
-
|
|
203
|
-
)
|
|
117
|
+
if normalize_uuid(sensor) != str(self.sensor_id):
|
|
118
|
+
self.sensor_id = normalize_uuid(sensor)
|
|
119
|
+
self._sensor = None
|
|
204
120
|
|
|
205
121
|
@property
|
|
206
|
-
def observed_property(self) -> "
|
|
122
|
+
def observed_property(self) -> "ObservedProperty":
|
|
207
123
|
"""The observed property of this datastream."""
|
|
208
124
|
|
|
209
125
|
if self._observed_property is None:
|
|
210
|
-
self._observed_property = self.
|
|
211
|
-
uid=self.uid,
|
|
212
|
-
fetch_by_datastream_uid=True,
|
|
213
|
-
)
|
|
214
|
-
self._original_data["observed_property"] = self._observed_property
|
|
126
|
+
self._observed_property = self.client.observedproperties.get(uid=self.observed_property_id)
|
|
215
127
|
|
|
216
128
|
return self._observed_property
|
|
217
129
|
|
|
218
130
|
@observed_property.setter
|
|
219
|
-
def observed_property(
|
|
220
|
-
self, observed_property: Union["ObservedProperty", UUID, str]
|
|
221
|
-
):
|
|
131
|
+
def observed_property(self, observed_property: Union["ObservedProperty", UUID, str] = ...):
|
|
222
132
|
if not observed_property:
|
|
223
133
|
raise ValueError("Observed property of datastream cannot be None.")
|
|
224
|
-
if
|
|
225
|
-
self.observed_property
|
|
226
|
-
|
|
227
|
-
self._observed_property = self._connection.observedproperties.get(
|
|
228
|
-
uid=str(getattr(observed_property, "uid", observed_property))
|
|
229
|
-
)
|
|
134
|
+
if normalize_uuid(observed_property) != str(self.observed_property_id):
|
|
135
|
+
self.observed_property_id = normalize_uuid(observed_property)
|
|
136
|
+
self._observed_property = None
|
|
230
137
|
|
|
231
138
|
@property
|
|
232
139
|
def unit(self) -> "Unit":
|
|
233
|
-
"""The unit this datastream
|
|
140
|
+
"""The unit of this datastream."""
|
|
234
141
|
|
|
235
142
|
if self._unit is None:
|
|
236
|
-
|
|
237
|
-
self._unit = self._connection.units.get(uid=datastream["unitId"])
|
|
238
|
-
self._original_data["unit"] = self._unit
|
|
143
|
+
self._unit = self.client.units.get(uid=self.unit_id)
|
|
239
144
|
|
|
240
145
|
return self._unit
|
|
241
146
|
|
|
242
147
|
@unit.setter
|
|
243
|
-
def unit(self, unit: Union["Unit", UUID, str]):
|
|
148
|
+
def unit(self, unit: Union["Unit", UUID, str] = ...):
|
|
244
149
|
if not unit:
|
|
245
150
|
raise ValueError("Unit of datastream cannot be None.")
|
|
246
|
-
if
|
|
247
|
-
self.
|
|
151
|
+
if normalize_uuid(unit) != str(self.unit_id):
|
|
152
|
+
self.unit_id = normalize_uuid(unit)
|
|
153
|
+
self._unit = None
|
|
248
154
|
|
|
249
155
|
@property
|
|
250
|
-
def processing_level(self) -> "
|
|
156
|
+
def processing_level(self) -> "ProcessingLevel":
|
|
251
157
|
"""The processing level of this datastream."""
|
|
252
158
|
|
|
253
159
|
if self._processing_level is None:
|
|
254
|
-
|
|
255
|
-
self._processing_level = self._connection.processinglevels.get(uid=datastream["processingLevelId"])
|
|
256
|
-
self._original_data["processing_level"] = self._processing_level
|
|
160
|
+
self._processing_level = self.client.processinglevels.get(uid=self.processing_level_id)
|
|
257
161
|
|
|
258
162
|
return self._processing_level
|
|
259
163
|
|
|
260
164
|
@processing_level.setter
|
|
261
|
-
def processing_level(self, processing_level: Union["ProcessingLevel", UUID, str]):
|
|
165
|
+
def processing_level(self, processing_level: Union["ProcessingLevel", UUID, str] = ...):
|
|
262
166
|
if not processing_level:
|
|
263
167
|
raise ValueError("Processing level of datastream cannot be None.")
|
|
264
|
-
if
|
|
265
|
-
self.processing_level
|
|
266
|
-
|
|
267
|
-
self._processing_level = self._connection.processinglevels.get(
|
|
268
|
-
uid=str(getattr(processing_level, "uid", processing_level))
|
|
269
|
-
)
|
|
168
|
+
if normalize_uuid(processing_level) != str(self.processing_level_id):
|
|
169
|
+
self.processing_level_id = normalize_uuid(processing_level)
|
|
170
|
+
self._processing_level = None
|
|
270
171
|
|
|
271
|
-
|
|
272
|
-
|
|
172
|
+
@property
|
|
173
|
+
def data_source(self) -> Optional["DataSource"]:
|
|
174
|
+
"""The data source of this datastream."""
|
|
273
175
|
|
|
274
|
-
self.
|
|
275
|
-
|
|
276
|
-
self._observed_property = None
|
|
277
|
-
self._unit = None
|
|
278
|
-
self._processing_level = None
|
|
279
|
-
self._sensor = None
|
|
280
|
-
super()._refresh()
|
|
176
|
+
if self._data_source is None and self.data_source_id is not None:
|
|
177
|
+
self._data_source = self.client.datasources.get(uid=self.data_source_id)
|
|
281
178
|
|
|
282
|
-
|
|
283
|
-
"""Save changes to this datastream to HydroServer."""
|
|
179
|
+
return self._data_source
|
|
284
180
|
|
|
285
|
-
|
|
181
|
+
@property
|
|
182
|
+
def data_archives(self) -> List["DataArchive"]:
|
|
183
|
+
"""The data archives of this datastream."""
|
|
286
184
|
|
|
287
|
-
|
|
288
|
-
|
|
185
|
+
if self._data_archives is None:
|
|
186
|
+
self._data_archives = self.client.dataarchives.list(datastream=self.uid, fetch_all=True).items
|
|
289
187
|
|
|
290
|
-
|
|
188
|
+
return self._data_archives
|
|
291
189
|
|
|
292
190
|
def get_observations(
|
|
293
191
|
self,
|
|
294
|
-
|
|
295
|
-
end_time: datetime = None,
|
|
296
|
-
page: int = 1,
|
|
192
|
+
page: int = ...,
|
|
297
193
|
page_size: int = 100000,
|
|
298
|
-
|
|
194
|
+
order_by: List[str] = ...,
|
|
195
|
+
phenomenon_time_max: datetime = ...,
|
|
196
|
+
phenomenon_time_min: datetime = ...,
|
|
299
197
|
fetch_all: bool = False,
|
|
300
|
-
) -> DataFrame:
|
|
198
|
+
) -> pd.DataFrame:
|
|
301
199
|
"""Retrieve the observations for this datastream."""
|
|
302
200
|
|
|
303
|
-
return self.
|
|
201
|
+
return self.client.datastreams.get_observations(
|
|
304
202
|
uid=self.uid,
|
|
305
|
-
start_time=start_time,
|
|
306
|
-
end_time=end_time,
|
|
307
203
|
page=page,
|
|
308
204
|
page_size=page_size,
|
|
309
|
-
|
|
310
|
-
|
|
205
|
+
order_by=order_by,
|
|
206
|
+
phenomenon_time_max=phenomenon_time_max,
|
|
207
|
+
phenomenon_time_min=phenomenon_time_min,
|
|
208
|
+
fetch_all=fetch_all
|
|
311
209
|
)
|
|
312
210
|
|
|
313
211
|
def load_observations(
|
|
314
212
|
self,
|
|
315
|
-
observations: DataFrame,
|
|
213
|
+
observations: pd.DataFrame,
|
|
316
214
|
) -> None:
|
|
317
215
|
"""Load a DataFrame of observations to the datastream."""
|
|
318
216
|
|
|
319
|
-
return self.
|
|
217
|
+
return self.client.datastreams.load_observations(
|
|
320
218
|
uid=self.uid,
|
|
321
219
|
observations=observations,
|
|
322
220
|
)
|
|
323
221
|
|
|
222
|
+
def delete_observations(
|
|
223
|
+
self,
|
|
224
|
+
phenomenon_time_start: Optional[datetime] = None,
|
|
225
|
+
phenomenon_time_end: Optional[datetime] = None,
|
|
226
|
+
):
|
|
227
|
+
"""Delete the observations for this datastream."""
|
|
228
|
+
|
|
229
|
+
return self.client.datastreams.delete_observations(
|
|
230
|
+
uid=self.uid,
|
|
231
|
+
phenomenon_time_start=phenomenon_time_start,
|
|
232
|
+
phenomenon_time_end=phenomenon_time_end,
|
|
233
|
+
)
|
|
234
|
+
|
|
324
235
|
# TODO: Find a better long-term solution for this issue.
|
|
325
236
|
def sync_phenomenon_end_time(self):
|
|
326
237
|
"""Ensures the phenomenon_end_time field matches the actual end time of the observations."""
|
|
327
238
|
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
params={
|
|
331
|
-
|
|
332
|
-
"page": 1,
|
|
333
|
-
"page_size": 1
|
|
334
|
-
}
|
|
239
|
+
path = f"/{self.client.base_route}/{self.get_route()}/{str(self.uid)}/observations"
|
|
240
|
+
response = self.client.request(
|
|
241
|
+
"get", path, params={"page_size": 1, "order_by": "-phenomenonTime"}
|
|
242
|
+
|
|
335
243
|
).json()
|
|
336
244
|
|
|
337
|
-
if len(response
|
|
338
|
-
self.phenomenon_end_time = datetime.fromisoformat(response["
|
|
245
|
+
if len(response) > 0:
|
|
246
|
+
self.phenomenon_end_time = datetime.fromisoformat(response[0]["phenomenonTime"])
|
|
339
247
|
else:
|
|
340
248
|
self.phenomenon_end_time = None
|
|
341
249
|
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
from typing import Optional, Any, List, TYPE_CHECKING
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from requests import Response
|
|
5
|
+
from pydantic.alias_generators import to_snake
|
|
6
|
+
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
from hydroserverpy.api.models import Datastream
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class ObservationCollection:
|
|
13
|
+
dataframe: pd.DataFrame
|
|
14
|
+
filters: Optional[dict[str, Any]] = None
|
|
15
|
+
order_by: Optional[List[str]] = None
|
|
16
|
+
page: Optional[int] = None
|
|
17
|
+
page_size: Optional[int] = None
|
|
18
|
+
total_pages: Optional[int] = None
|
|
19
|
+
total_count: Optional[int] = None
|
|
20
|
+
|
|
21
|
+
def __init__(
|
|
22
|
+
self,
|
|
23
|
+
datastream: "Datastream",
|
|
24
|
+
response: Optional[Response] = None,
|
|
25
|
+
**data
|
|
26
|
+
):
|
|
27
|
+
self.filters = data.get("filters")
|
|
28
|
+
self.order_by = data.get("order_by")
|
|
29
|
+
self.page = data.get("page") or (int(response.headers.get("X-Page")) if response else None)
|
|
30
|
+
self.page_size = data.get("page_size") or (int(response.headers.get("X-Page-Size")) if response else None)
|
|
31
|
+
self.total_pages = data.get("total_pages") or (int(response.headers.get("X-Total-Pages")) if response else None)
|
|
32
|
+
self.total_count = data.get("total_count") or (int(response.headers.get("X-Total-Count")) if response else None)
|
|
33
|
+
self.datastream = datastream
|
|
34
|
+
|
|
35
|
+
if "dataframe" in data:
|
|
36
|
+
self.dataframe = data["dataframe"]
|
|
37
|
+
elif response is not None:
|
|
38
|
+
data = response.json()
|
|
39
|
+
self.dataframe = pd.DataFrame({to_snake(k): v for k, v in data.items()})
|
|
40
|
+
if "phenomenon_time" in self.dataframe.columns:
|
|
41
|
+
self.dataframe["phenomenon_time"] = pd.to_datetime(self.dataframe["phenomenon_time"], utc=True)
|
|
42
|
+
else:
|
|
43
|
+
self.dataframe = pd.DataFrame()
|
|
44
|
+
|
|
45
|
+
def next_page(self):
|
|
46
|
+
"""Fetches the next page of data from HydroServer."""
|
|
47
|
+
|
|
48
|
+
return self.datastream.get_observations(
|
|
49
|
+
**(self.filters or {}),
|
|
50
|
+
page=(self.page or 0) + 1,
|
|
51
|
+
page_size=self.page_size or 100000,
|
|
52
|
+
order_by=self.order_by or ...,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
def previous_page(self):
|
|
56
|
+
"""Fetches the previous page of data from HydroServer."""
|
|
57
|
+
|
|
58
|
+
if not self.page or self.page <= 1:
|
|
59
|
+
return None
|
|
60
|
+
|
|
61
|
+
return self.datastream.get_observations(
|
|
62
|
+
**(self.filters or {}),
|
|
63
|
+
page=self.page - 1,
|
|
64
|
+
page_size=self.page_size or 100000,
|
|
65
|
+
order_by=self.order_by or ...,
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
def fetch_all(self) -> "ObservationCollection":
|
|
69
|
+
"""Fetches all pages of data from HydroServer for this collection."""
|
|
70
|
+
|
|
71
|
+
all_dataframes = []
|
|
72
|
+
page_num = 1
|
|
73
|
+
|
|
74
|
+
while self.total_pages is None or page_num <= self.total_pages:
|
|
75
|
+
if page_num == self.page:
|
|
76
|
+
all_dataframes.append(self.dataframe)
|
|
77
|
+
else:
|
|
78
|
+
observations = self.datastream.get_observations(
|
|
79
|
+
**(self.filters or {}),
|
|
80
|
+
page=page_num,
|
|
81
|
+
page_size=self.page_size or 100000,
|
|
82
|
+
order_by=self.order_by or ...,
|
|
83
|
+
)
|
|
84
|
+
if observations.dataframe.empty:
|
|
85
|
+
break
|
|
86
|
+
all_dataframes.append(observations.dataframe)
|
|
87
|
+
|
|
88
|
+
page_num += 1
|
|
89
|
+
|
|
90
|
+
merged_dataframe = pd.concat(all_dataframes, ignore_index=True)
|
|
91
|
+
|
|
92
|
+
return self.__class__(
|
|
93
|
+
dataframe=merged_dataframe,
|
|
94
|
+
datastream=self.datastream,
|
|
95
|
+
filters=self.filters,
|
|
96
|
+
order_by=self.order_by or ...,
|
|
97
|
+
page=1,
|
|
98
|
+
page_size=len(merged_dataframe),
|
|
99
|
+
total_pages=1,
|
|
100
|
+
total_count=len(merged_dataframe)
|
|
101
|
+
)
|
|
@@ -1,72 +1,37 @@
|
|
|
1
|
-
|
|
2
|
-
from
|
|
3
|
-
from pydantic import
|
|
4
|
-
from ..base import
|
|
1
|
+
import uuid
|
|
2
|
+
from typing import Optional, ClassVar, TYPE_CHECKING
|
|
3
|
+
from pydantic import Field
|
|
4
|
+
from ..base import HydroServerBaseModel
|
|
5
5
|
|
|
6
6
|
if TYPE_CHECKING:
|
|
7
7
|
from hydroserverpy import HydroServer
|
|
8
8
|
from hydroserverpy.api.models import Workspace
|
|
9
9
|
|
|
10
10
|
|
|
11
|
-
class
|
|
11
|
+
class ObservedProperty(HydroServerBaseModel):
|
|
12
12
|
name: str = Field(..., max_length=255)
|
|
13
13
|
definition: str
|
|
14
14
|
description: str
|
|
15
|
-
observed_property_type: str = Field(
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
serialization_alias="type",
|
|
19
|
-
validation_alias=AliasChoices("type", AliasPath("properties", "variableType")),
|
|
20
|
-
)
|
|
21
|
-
code: str = Field(
|
|
22
|
-
...,
|
|
23
|
-
max_length=255,
|
|
24
|
-
validation_alias=AliasChoices("code", AliasPath("properties", "variableCode")),
|
|
25
|
-
)
|
|
15
|
+
observed_property_type: str = Field(..., max_length=255, alias="type")
|
|
16
|
+
code: str = Field(..., max_length=255)
|
|
17
|
+
workspace_id: Optional[uuid.UUID] = None
|
|
26
18
|
|
|
19
|
+
_editable_fields: ClassVar[set[str]] = {"name", "definition", "description", "observed_property_type", "code"}
|
|
27
20
|
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
super().__init__(
|
|
31
|
-
_connection=_connection, _model_ref="observedproperties", _uid=_uid, **data
|
|
32
|
-
)
|
|
33
|
-
|
|
34
|
-
self._workspace_id = (
|
|
35
|
-
data.get("workspace_id")
|
|
36
|
-
or data.get("workspaceId")
|
|
37
|
-
or (
|
|
38
|
-
None
|
|
39
|
-
if data.get("properties", {}).get("workspace") is None
|
|
40
|
-
else data.get("properties", {}).get("workspace", {}).get("id")
|
|
41
|
-
)
|
|
42
|
-
)
|
|
43
|
-
self._workspace_id = (
|
|
44
|
-
str(self._workspace_id) if self._workspace_id is not None else None
|
|
45
|
-
)
|
|
21
|
+
def __init__(self, client: "HydroServer", **data):
|
|
22
|
+
super().__init__(client=client, service=client.observedproperties, **data)
|
|
46
23
|
|
|
47
24
|
self._workspace = None
|
|
48
25
|
|
|
26
|
+
@classmethod
|
|
27
|
+
def get_route(cls):
|
|
28
|
+
return "observed-properties"
|
|
29
|
+
|
|
49
30
|
@property
|
|
50
|
-
def workspace(self) -> "Workspace":
|
|
31
|
+
def workspace(self) -> Optional["Workspace"]:
|
|
51
32
|
"""The workspace this observed property belongs to."""
|
|
52
33
|
|
|
53
|
-
if self._workspace is None and self.
|
|
54
|
-
self._workspace = self.
|
|
34
|
+
if self._workspace is None and self.workspace_id:
|
|
35
|
+
self._workspace = self.client.workspaces.get(uid=self.workspace_id)
|
|
55
36
|
|
|
56
37
|
return self._workspace
|
|
57
|
-
|
|
58
|
-
def refresh(self):
|
|
59
|
-
"""Refresh this observed property from HydroServer."""
|
|
60
|
-
|
|
61
|
-
super()._refresh()
|
|
62
|
-
self._workspace = None
|
|
63
|
-
|
|
64
|
-
def save(self):
|
|
65
|
-
"""Save changes to this observed property to HydroServer."""
|
|
66
|
-
|
|
67
|
-
super()._save()
|
|
68
|
-
|
|
69
|
-
def delete(self):
|
|
70
|
-
"""Delete this observed property from HydroServer."""
|
|
71
|
-
|
|
72
|
-
super()._delete()
|