hydroserverpy 0.3.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hydroserverpy might be problematic. Click here for more details.

Files changed (49) hide show
  1. hydroserverpy/__init__.py +1 -1
  2. hydroserverpy/core/endpoints/base.py +44 -31
  3. hydroserverpy/core/endpoints/data_loaders.py +6 -5
  4. hydroserverpy/core/endpoints/data_sources.py +6 -5
  5. hydroserverpy/core/endpoints/datastreams.py +89 -52
  6. hydroserverpy/core/endpoints/observed_properties.py +36 -18
  7. hydroserverpy/core/endpoints/processing_levels.py +36 -18
  8. hydroserverpy/core/endpoints/result_qualifiers.py +37 -19
  9. hydroserverpy/core/endpoints/sensors.py +37 -19
  10. hydroserverpy/core/endpoints/things.py +58 -37
  11. hydroserverpy/core/endpoints/units.py +37 -19
  12. hydroserverpy/core/schemas/base.py +13 -6
  13. hydroserverpy/core/schemas/data_loaders.py +6 -4
  14. hydroserverpy/core/schemas/data_sources.py +73 -56
  15. hydroserverpy/core/schemas/datastreams.py +101 -70
  16. hydroserverpy/core/schemas/observed_properties.py +18 -10
  17. hydroserverpy/core/schemas/processing_levels.py +10 -6
  18. hydroserverpy/core/schemas/result_qualifiers.py +7 -4
  19. hydroserverpy/core/schemas/sensors.py +33 -18
  20. hydroserverpy/core/schemas/things.py +97 -60
  21. hydroserverpy/core/schemas/units.py +7 -8
  22. hydroserverpy/core/service.py +31 -17
  23. hydroserverpy/etl/__init__.py +21 -0
  24. hydroserverpy/etl/extractors/__init__.py +0 -0
  25. hydroserverpy/etl/extractors/base.py +13 -0
  26. hydroserverpy/etl/extractors/ftp_extractor.py +50 -0
  27. hydroserverpy/etl/extractors/http_extractor.py +84 -0
  28. hydroserverpy/etl/extractors/local_file_extractor.py +25 -0
  29. hydroserverpy/etl/hydroserver_etl.py +40 -0
  30. hydroserverpy/etl/loaders/__init__.py +0 -0
  31. hydroserverpy/etl/loaders/base.py +13 -0
  32. hydroserverpy/etl/loaders/hydroserver_loader.py +68 -0
  33. hydroserverpy/etl/transformers/__init__.py +0 -0
  34. hydroserverpy/etl/transformers/base.py +52 -0
  35. hydroserverpy/etl/transformers/csv_transformer.py +88 -0
  36. hydroserverpy/etl/transformers/json_transformer.py +62 -0
  37. hydroserverpy/etl/types.py +7 -0
  38. hydroserverpy/etl_csv/__init__.py +0 -0
  39. hydroserverpy/{etl/service.py → etl_csv/hydroserver_etl_csv.py} +92 -54
  40. hydroserverpy/quality/service.py +84 -70
  41. hydroserverpy-0.4.0.dist-info/METADATA +18 -0
  42. hydroserverpy-0.4.0.dist-info/RECORD +51 -0
  43. {hydroserverpy-0.3.0.dist-info → hydroserverpy-0.4.0.dist-info}/WHEEL +1 -1
  44. hydroserverpy-0.3.0.dist-info/METADATA +0 -18
  45. hydroserverpy-0.3.0.dist-info/RECORD +0 -36
  46. /hydroserverpy/{etl → etl_csv}/exceptions.py +0 -0
  47. {hydroserverpy-0.3.0.dist-info → hydroserverpy-0.4.0.dist-info}/LICENSE +0 -0
  48. {hydroserverpy-0.3.0.dist-info → hydroserverpy-0.4.0.dist-info}/top_level.txt +0 -0
  49. {hydroserverpy-0.3.0.dist-info → hydroserverpy-0.4.0.dist-info}/zip-safe +0 -0
@@ -6,7 +6,7 @@ from datetime import datetime
6
6
  from uuid import UUID
7
7
  from urllib.request import urlopen
8
8
  from hydroserverpy.core.schemas.base import HydroServerCoreModel
9
- from hydroserverpy.etl.service import HydroServerETL
9
+ from hydroserverpy.etl_csv.hydroserver_etl_csv import HydroServerETLCSV
10
10
 
11
11
  if TYPE_CHECKING:
12
12
  from hydroserverpy.core.schemas.data_loaders import DataLoader
@@ -15,92 +15,105 @@ if TYPE_CHECKING:
15
15
 
16
16
  class DataSourceFields(BaseModel):
17
17
  name: str = Field(
18
- ..., strip_whitespace=True, max_length=255,
19
- description='The name of the data source.'
18
+ ...,
19
+ strip_whitespace=True,
20
+ max_length=255,
21
+ description="The name of the data source.",
20
22
  )
21
23
  path: Optional[str] = Field(
22
- None, strip_whitespace=True, max_length=255,
23
- description='The path to a local data source file.'
24
+ None,
25
+ strip_whitespace=True,
26
+ max_length=255,
27
+ description="The path to a local data source file.",
24
28
  )
25
29
  link: Optional[str] = Field(
26
- None, strip_whitespace=True, max_length=255,
27
- description='The link to a remote data source file.'
30
+ None,
31
+ strip_whitespace=True,
32
+ max_length=255,
33
+ description="The link to a remote data source file.",
28
34
  )
29
35
  header_row: Optional[int] = Field(
30
- None, gt=0, lt=9999,
31
- description='The row number where the data begins.'
36
+ None, gt=0, lt=9999, description="The row number where the data begins."
32
37
  )
33
38
  data_start_row: Optional[int] = Field(
34
- None, gt=0, lt=9999,
35
- description='The row number where the data begins.'
39
+ None, gt=0, lt=9999, description="The row number where the data begins."
36
40
  )
37
41
  delimiter: Optional[str] = Field(
38
- ',', strip_whitespace=True, max_length=1,
39
- description='The delimiter used by the data source file.'
42
+ ",",
43
+ strip_whitespace=True,
44
+ max_length=1,
45
+ description="The delimiter used by the data source file.",
40
46
  )
41
47
  quote_char: Optional[str] = Field(
42
- '"', strip_whitespace=True, max_length=1,
43
- description='The quote delimiter character used by the data source file.'
48
+ '"',
49
+ strip_whitespace=True,
50
+ max_length=1,
51
+ description="The quote delimiter character used by the data source file.",
44
52
  )
45
53
  interval: Optional[int] = Field(
46
- None, gt=0, lt=9999,
47
- description='The time interval at which the data source should be loaded.'
48
- )
49
- interval_units: Optional[Literal['minutes', 'hours', 'days', 'weeks', 'months']] = Field(
50
54
  None,
51
- description='The interval units used by the data source file.'
55
+ gt=0,
56
+ lt=9999,
57
+ description="The time interval at which the data source should be loaded.",
58
+ )
59
+ interval_units: Optional[Literal["minutes", "hours", "days", "weeks", "months"]] = (
60
+ Field(None, description="The interval units used by the data source file.")
52
61
  )
53
62
  crontab: Optional[str] = Field(
54
- None, strip_whitespace=True, max_length=255,
55
- description='The crontab used to schedule when the data source should be loaded.'
63
+ None,
64
+ strip_whitespace=True,
65
+ max_length=255,
66
+ description="The crontab used to schedule when the data source should be loaded.",
56
67
  )
57
68
  start_time: Optional[datetime] = Field(
58
- None,
59
- description='When the data source should begin being loaded.'
69
+ None, description="When the data source should begin being loaded."
60
70
  )
61
71
  end_time: Optional[datetime] = Field(
62
- None,
63
- description='When the data source should stop being loaded.'
72
+ None, description="When the data source should stop being loaded."
64
73
  )
65
74
  paused: Optional[bool] = Field(
66
- False,
67
- description='Whether loading the data source should be paused or not.'
75
+ False, description="Whether loading the data source should be paused or not."
68
76
  )
69
77
  timestamp_column: Union[int, str] = Field(
70
- ..., strip_whitespace=True, max_length=255,
71
- description='The column of the data source file containing the timestamps.'
78
+ ...,
79
+ strip_whitespace=True,
80
+ max_length=255,
81
+ description="The column of the data source file containing the timestamps.",
72
82
  )
73
83
  timestamp_format: Optional[str] = Field(
74
- '%Y-%m-%dT%H:%M:%S%Z', strip_whitespace=True, max_length=255,
75
- description='The format of the timestamps, using Python\'s datetime strftime codes.'
84
+ "%Y-%m-%dT%H:%M:%S%Z",
85
+ strip_whitespace=True,
86
+ max_length=255,
87
+ description="The format of the timestamps, using Python's datetime strftime codes.",
76
88
  )
77
89
  timestamp_offset: Optional[str] = Field(
78
- '+0000', strip_whitespace=True, max_length=255,
79
- description='An ISO 8601 time zone offset designator code to be applied to timestamps in the data source file.'
90
+ "+0000",
91
+ strip_whitespace=True,
92
+ max_length=255,
93
+ description="An ISO 8601 time zone offset designator code to be applied to timestamps in the data source file.",
80
94
  )
81
95
  data_loader_id: UUID = Field(
82
96
  ...,
83
- description='The ID of the data loader responsible for loading this data source.'
97
+ description="The ID of the data loader responsible for loading this data source.",
84
98
  )
85
99
  data_source_thru: Optional[datetime] = Field(
86
- None,
87
- description='The timestamp through which the data source contains data.'
100
+ None, description="The timestamp through which the data source contains data."
88
101
  )
89
102
  last_sync_successful: Optional[bool] = Field(
90
- None,
91
- description='Whether the last data loading attempt was successful of not.'
103
+ None, description="Whether the last data loading attempt was successful of not."
92
104
  )
93
105
  last_sync_message: Optional[str] = Field(
94
- None, strip_whitespace=True,
95
- description='A message generated by the data loader it attempted to load data from this data source.'
106
+ None,
107
+ strip_whitespace=True,
108
+ description="A message generated by the data loader it attempted to load data from this data source.",
96
109
  )
97
110
  last_synced: Optional[datetime] = Field(
98
111
  None,
99
- description='The last time the data loader attempted to load data from this data source.'
112
+ description="The last time the data loader attempted to load data from this data source.",
100
113
  )
101
114
  next_sync: Optional[datetime] = Field(
102
115
  None,
103
- description="The next time the data loader will attempt to load data from this data source."
116
+ description="The next time the data loader will attempt to load data from this data source.",
104
117
  )
105
118
 
106
119
 
@@ -128,7 +141,7 @@ class DataSource(HydroServerCoreModel, DataSourceFields):
128
141
  self._data_loader = None
129
142
 
130
143
  @property
131
- def datastreams(self) -> List['Datastream']:
144
+ def datastreams(self) -> List["Datastream"]:
132
145
  """
133
146
  Retrieve the datastreams associated with the DataSource. If not already cached, fetch the datastreams from the
134
147
  server.
@@ -143,7 +156,7 @@ class DataSource(HydroServerCoreModel, DataSourceFields):
143
156
  return self._datastreams
144
157
 
145
158
  @property
146
- def data_loader(self) -> 'DataLoader':
159
+ def data_loader(self) -> "DataLoader":
147
160
  """
148
161
  Retrieve the data loader associated with the data source. If not already cached, fetch the data loader from the
149
162
  server.
@@ -153,7 +166,9 @@ class DataSource(HydroServerCoreModel, DataSourceFields):
153
166
  """
154
167
 
155
168
  if self._data_loader is None:
156
- self._data_loader = self._endpoint._service.dataloaders.get(uid=self.data_loader_id) # noqa
169
+ self._data_loader = self._endpoint._service.dataloaders.get(
170
+ uid=self.data_loader_id
171
+ ) # noqa
157
172
 
158
173
  return self._data_loader
159
174
 
@@ -163,13 +178,15 @@ class DataSource(HydroServerCoreModel, DataSourceFields):
163
178
  they were previously loaded.
164
179
  """
165
180
 
166
- entity = self._endpoint.get(uid=self.uid).model_dump(exclude=['uid'])
181
+ entity = self._endpoint.get(uid=self.uid).model_dump(exclude=["uid"])
167
182
  self._original_data = entity
168
183
  self.__dict__.update(entity)
169
184
  if self._datastreams is not None:
170
185
  self._datastreams = self._endpoint.list_datastreams(uid=self.uid)
171
186
  if self._data_loader is not None:
172
- self._data_loader = self._endpoint._service.dataloaders.get(uid=self.data_loader_id) # noqa
187
+ self._data_loader = self._endpoint._service.dataloaders.get(
188
+ uid=self.data_loader_id
189
+ ) # noqa
173
190
 
174
191
  def load_observations(self) -> None:
175
192
  """
@@ -177,16 +194,16 @@ class DataSource(HydroServerCoreModel, DataSourceFields):
177
194
  """
178
195
 
179
196
  if self.path:
180
- with open(self.path, 'rb') as f:
181
- with io.TextIOWrapper(f, encoding='utf-8') as data_file:
182
- hs_etl = HydroServerETL(
183
- service=getattr(self._endpoint, '_service'),
197
+ with open(self.path, "rb") as f:
198
+ with io.TextIOWrapper(f, encoding="utf-8") as data_file:
199
+ hs_etl = HydroServerETLCSV(
200
+ service=getattr(self._endpoint, "_service"),
184
201
  data_file=data_file,
185
202
  data_source=self,
186
203
  )
187
204
  hs_etl.run()
188
205
  elif self.link:
189
- with tempfile.NamedTemporaryFile(mode='w+b') as temp_file:
206
+ with tempfile.NamedTemporaryFile(mode="w+b") as temp_file:
190
207
  with urlopen(self.link) as response:
191
208
  chunk_size = 1024 * 1024 * 10 # Use a 10mb chunk size.
192
209
  while True:
@@ -195,9 +212,9 @@ class DataSource(HydroServerCoreModel, DataSourceFields):
195
212
  break
196
213
  temp_file.write(chunk)
197
214
  temp_file.seek(0)
198
- with io.TextIOWrapper(temp_file, encoding='utf-8') as data_file:
199
- hs_etl = HydroServerETL(
200
- service=getattr(self._endpoint, '_service'),
215
+ with io.TextIOWrapper(temp_file, encoding="utf-8") as data_file:
216
+ hs_etl = HydroServerETLCSV(
217
+ service=getattr(self._endpoint, "_service"),
201
218
  data_file=data_file,
202
219
  data_source=self,
203
220
  )
@@ -16,108 +16,118 @@ if TYPE_CHECKING:
16
16
 
17
17
  class DatastreamFields(BaseModel):
18
18
  name: str = Field(
19
- ..., strip_whitespace=True, max_length=255,
20
- description='The name of the datastream.'
19
+ ...,
20
+ strip_whitespace=True,
21
+ max_length=255,
22
+ description="The name of the datastream.",
21
23
  )
22
24
  description: str = Field(
23
- ..., strip_whitespace=True,
24
- description='A description of the datastream.'
25
+ ..., strip_whitespace=True, description="A description of the datastream."
25
26
  )
26
27
  observation_type: str = Field(
27
- ..., strip_whitespace=True, max_length=255,
28
- description='The type of observation recorded in this datastream'
28
+ ...,
29
+ strip_whitespace=True,
30
+ max_length=255,
31
+ description="The type of observation recorded in this datastream",
29
32
  )
30
33
  sampled_medium: str = Field(
31
- ..., strip_whitespace=True, max_length=255,
32
- description='The physical medium in which the observations were sampled.'
34
+ ...,
35
+ strip_whitespace=True,
36
+ max_length=255,
37
+ description="The physical medium in which the observations were sampled.",
33
38
  )
34
39
  no_data_value: float = Field(
35
40
  ...,
36
- description='A numerical value representing no data at a given timestamp.',
41
+ description="A numerical value representing no data at a given timestamp.",
37
42
  )
38
43
  aggregation_statistic: str = Field(
39
- ..., strip_whitespace=True, max_length=255,
40
- description='The statistic calculated over the time aggregation interval of observations in this datastream.'
44
+ ...,
45
+ strip_whitespace=True,
46
+ max_length=255,
47
+ description="The statistic calculated over the time aggregation interval of observations in this datastream.",
41
48
  )
42
49
  time_aggregation_interval: float = Field(
43
50
  ...,
44
- description='The time interval over which the aggregation statistic is applied to observations.',
51
+ description="The time interval over which the aggregation statistic is applied to observations.",
45
52
  )
46
53
  status: Optional[str] = Field(
47
- None, strip_whitespace=True, max_length=255,
48
- description='The current status of this datastream.'
54
+ None,
55
+ strip_whitespace=True,
56
+ max_length=255,
57
+ description="The current status of this datastream.",
49
58
  )
50
59
  result_type: str = Field(
51
- ..., strip_whitespace=True, max_length=255,
52
- description='The type of result recorded in this datastream.'
60
+ ...,
61
+ strip_whitespace=True,
62
+ max_length=255,
63
+ description="The type of result recorded in this datastream.",
53
64
  )
54
65
  value_count: Optional[int] = Field(
55
- None, ge=0,
56
- description='The total number of observations in this datastream.'
66
+ None, ge=0, description="The total number of observations in this datastream."
57
67
  )
58
68
  phenomenon_begin_time: Optional[datetime] = Field(
59
69
  None,
60
- description='The timestamp representing when the first phenomenon recorded in this datastream occurred.'
70
+ description="The timestamp representing when the first phenomenon recorded in this datastream occurred.",
61
71
  )
62
72
  phenomenon_end_time: Optional[datetime] = Field(
63
73
  None,
64
- description='The timestamp representing when the last phenomenon recorded in this datastream occurred.'
74
+ description="The timestamp representing when the last phenomenon recorded in this datastream occurred.",
65
75
  )
66
76
  result_begin_time: Optional[datetime] = Field(
67
77
  None,
68
- description='The timestamp representing when the first observation of this datastream was recorded.'
78
+ description="The timestamp representing when the first observation of this datastream was recorded.",
69
79
  )
70
80
  result_end_time: Optional[datetime] = Field(
71
81
  None,
72
- description='The timestamp representing when the last observation of this datastream was recorded.'
82
+ description="The timestamp representing when the last observation of this datastream was recorded.",
73
83
  )
74
84
  data_source_id: Optional[UUID] = Field(
75
- None,
76
- description='The data source for observations of this datastream.'
85
+ None, description="The data source for observations of this datastream."
77
86
  )
78
87
  data_source_column: Optional[str] = Field(
79
- None, strip_whitespace=True, max_length=255,
80
- description='The name of the column containing this datastream\'s observations in the data source file.'
88
+ None,
89
+ strip_whitespace=True,
90
+ max_length=255,
91
+ description="The name of the column containing this datastream's observations in the data source file.",
81
92
  )
82
93
  is_visible: bool = Field(
83
- True,
84
- description='Whether this datastream is publicly visible.'
94
+ True, description="Whether this datastream is publicly visible."
85
95
  )
86
96
  is_data_visible: bool = Field(
87
97
  True,
88
- description='Whether this observations associated with this datastream are publicly visible.'
98
+ description="Whether this observations associated with this datastream are publicly visible.",
89
99
  )
90
100
  thing_id: UUID = Field(
91
101
  ...,
92
- description='The site/thing from which observations of this datastream were recorded.'
102
+ description="The site/thing from which observations of this datastream were recorded.",
93
103
  )
94
104
  sensor_id: UUID = Field(
95
- ...,
96
- description='The sensor used to record observations of this datastream.'
105
+ ..., description="The sensor used to record observations of this datastream."
97
106
  )
98
107
  observed_property_id: UUID = Field(
99
- ...,
100
- description='The physical property being observed for this datastream.'
108
+ ..., description="The physical property being observed for this datastream."
101
109
  )
102
110
  processing_level_id: UUID = Field(
103
- ...,
104
- description='The processing level applied to this datastream.'
111
+ ..., description="The processing level applied to this datastream."
105
112
  )
106
113
  unit_id: UUID = Field(
107
- ...,
108
- description='The unit used to record observations for this datastream.'
114
+ ..., description="The unit used to record observations for this datastream."
109
115
  )
110
- time_aggregation_interval_units: Literal['seconds', 'minutes', 'hours', 'days'] = Field(
111
- ...,
112
- description='The time unit for this datastream\'s time aggregation interval'
116
+ time_aggregation_interval_units: Literal["seconds", "minutes", "hours", "days"] = (
117
+ Field(
118
+ ...,
119
+ description="The time unit for this datastream's time aggregation interval",
120
+ )
113
121
  )
114
122
  intended_time_spacing: Optional[float] = Field(
115
123
  None,
116
- description='The time interval at which observations should be made for this datastream.'
124
+ description="The time interval at which observations should be made for this datastream.",
117
125
  )
118
- intended_time_spacing_units: Optional[Literal['seconds', 'minutes', 'hours', 'days']] = Field(
126
+ intended_time_spacing_units: Optional[
127
+ Literal["seconds", "minutes", "hours", "days"]
128
+ ] = Field(
119
129
  None,
120
- description='The time unit for this datastream\'s intended time spacing interval'
130
+ description="The time unit for this datastream's intended time spacing interval",
121
131
  )
122
132
 
123
133
 
@@ -153,7 +163,7 @@ class Datastream(HydroServerCoreModel, DatastreamFields):
153
163
  self._sensor = None
154
164
 
155
165
  @property
156
- def thing(self) -> 'Thing':
166
+ def thing(self) -> "Thing":
157
167
  """
158
168
  The thing entity associated with the datastream. If not already cached, fetch it from the server.
159
169
 
@@ -167,7 +177,7 @@ class Datastream(HydroServerCoreModel, DatastreamFields):
167
177
  return self._thing
168
178
 
169
179
  @property
170
- def data_source(self) -> 'DataSource':
180
+ def data_source(self) -> "DataSource":
171
181
  """
172
182
  The data source entity associated with the datastream. If not already cached, fetch it from the server.
173
183
 
@@ -176,12 +186,14 @@ class Datastream(HydroServerCoreModel, DatastreamFields):
176
186
  """
177
187
 
178
188
  if self._data_source is None:
179
- self._data_source = self._endpoint._service.datasources.get(uid=self.data_source_id) # noqa
189
+ self._data_source = self._endpoint._service.datasources.get(
190
+ uid=self.data_source_id
191
+ ) # noqa
180
192
 
181
193
  return self._data_source
182
194
 
183
195
  @property
184
- def observed_property(self) -> 'ObservedProperty':
196
+ def observed_property(self) -> "ObservedProperty":
185
197
  """
186
198
  Retrieve the observed property entity associated with the datastream. If not already cached, fetch it from the
187
199
  server.
@@ -191,12 +203,14 @@ class Datastream(HydroServerCoreModel, DatastreamFields):
191
203
  """
192
204
 
193
205
  if self._observed_property is None:
194
- self._observed_property = self._endpoint._service.observedproperties.get(uid=self.observed_property_id) # noqa
206
+ self._observed_property = self._endpoint._service.observedproperties.get(
207
+ uid=self.observed_property_id
208
+ ) # noqa
195
209
 
196
210
  return self._observed_property
197
211
 
198
212
  @property
199
- def processing_level(self) -> 'ProcessingLevel':
213
+ def processing_level(self) -> "ProcessingLevel":
200
214
  """
201
215
  Retrieve the processing level entity associated with the datastream. If not already cached, fetch it from the
202
216
  server.
@@ -206,12 +220,14 @@ class Datastream(HydroServerCoreModel, DatastreamFields):
206
220
  """
207
221
 
208
222
  if self._processing_level is None:
209
- self._processing_level = self._endpoint._service.processinglevels.get(uid=self.processing_level_id) # noqa
223
+ self._processing_level = self._endpoint._service.processinglevels.get(
224
+ uid=self.processing_level_id
225
+ ) # noqa
210
226
 
211
227
  return self._processing_level
212
228
 
213
229
  @property
214
- def unit(self) -> 'Unit':
230
+ def unit(self) -> "Unit":
215
231
  """
216
232
  Retrieve the unit entity associated with the datastream. If not already cached, fetch it from the server.
217
233
 
@@ -225,7 +241,7 @@ class Datastream(HydroServerCoreModel, DatastreamFields):
225
241
  return self._unit
226
242
 
227
243
  @property
228
- def sensor(self) -> 'Sensor':
244
+ def sensor(self) -> "Sensor":
229
245
  """
230
246
  Retrieve the sensor entity associated with the datastream. If not already cached, fetch it from the server.
231
247
 
@@ -234,7 +250,9 @@ class Datastream(HydroServerCoreModel, DatastreamFields):
234
250
  """
235
251
 
236
252
  if self._sensor is None:
237
- self._sensor = self._endpoint._service.sensors.get(uid=self.sensor_id) # noqa
253
+ self._sensor = self._endpoint._service.sensors.get(
254
+ uid=self.sensor_id
255
+ ) # noqa
238
256
 
239
257
  return self._sensor
240
258
 
@@ -244,30 +262,38 @@ class Datastream(HydroServerCoreModel, DatastreamFields):
244
262
  loaded.
245
263
  """
246
264
 
247
- entity = self._endpoint.get(uid=self.uid).model_dump(exclude=['uid'])
265
+ entity = self._endpoint.get(uid=self.uid).model_dump(exclude=["uid"])
248
266
  self._original_data = entity
249
267
  self.__dict__.update(entity)
250
268
  if self._thing is not None:
251
269
  self._thing = self._endpoint._service.things.get(uid=self.thing_id) # noqa
252
270
  if self._data_source is not None:
253
- self._data_source = self._endpoint._service.datasources.get(uid=self.data_source_id) # noqa
271
+ self._data_source = self._endpoint._service.datasources.get(
272
+ uid=self.data_source_id
273
+ ) # noqa
254
274
  if self._observed_property is not None:
255
- self._observed_property = self._endpoint._service.observedproperties.get(uid=self.observed_property_id) # noqa
275
+ self._observed_property = self._endpoint._service.observedproperties.get(
276
+ uid=self.observed_property_id
277
+ ) # noqa
256
278
  if self._processing_level is not None:
257
- self._processing_level = self._endpoint._service.processinglevels.get(uid=self.processing_level_id) # noqa
279
+ self._processing_level = self._endpoint._service.processinglevels.get(
280
+ uid=self.processing_level_id
281
+ ) # noqa
258
282
  if self._unit is not None:
259
283
  self._unit = self._endpoint._service.units.get(uid=self.unit_id) # noqa
260
284
  if self._sensor is not None:
261
- self._sensor = self._endpoint._service.sensors.get(uid=self.sensor_id) # noqa
285
+ self._sensor = self._endpoint._service.sensors.get(
286
+ uid=self.sensor_id
287
+ ) # noqa
262
288
 
263
289
  def get_observations(
264
- self,
265
- start_time: datetime = None,
266
- end_time: datetime = None,
267
- page: int = 1,
268
- page_size: int = 100000,
269
- include_quality: bool = False,
270
- fetch_all: bool = False
290
+ self,
291
+ start_time: datetime = None,
292
+ end_time: datetime = None,
293
+ page: int = 1,
294
+ page_size: int = 100000,
295
+ include_quality: bool = False,
296
+ fetch_all: bool = False,
271
297
  ) -> DataFrame:
272
298
  """
273
299
  Retrieve the observations for this datastream.
@@ -277,13 +303,18 @@ class Datastream(HydroServerCoreModel, DatastreamFields):
277
303
  """
278
304
 
279
305
  return self._endpoint.get_observations(
280
- uid=self.uid, start_time=start_time, end_time=end_time, page=page, page_size=page_size,
281
- include_quality=include_quality, fetch_all=fetch_all
306
+ uid=self.uid,
307
+ start_time=start_time,
308
+ end_time=end_time,
309
+ page=page,
310
+ page_size=page_size,
311
+ include_quality=include_quality,
312
+ fetch_all=fetch_all,
282
313
  )
283
314
 
284
315
  def load_observations(
285
- self,
286
- observations: DataFrame,
316
+ self,
317
+ observations: DataFrame,
287
318
  ) -> None:
288
319
  """
289
320
  Load a DataFrame of observations to the datastream.
@@ -5,24 +5,32 @@ from hydroserverpy.core.schemas.base import HydroServerCoreModel
5
5
 
6
6
  class ObservedPropertyFields(BaseModel):
7
7
  name: str = Field(
8
- ..., strip_whitespace=True, max_length=255,
9
- description='The name of the observed property.'
8
+ ...,
9
+ strip_whitespace=True,
10
+ max_length=255,
11
+ description="The name of the observed property.",
10
12
  )
11
13
  definition: str = Field(
12
- ..., strip_whitespace=True,
13
- description='The definition of the observed property.'
14
+ ...,
15
+ strip_whitespace=True,
16
+ description="The definition of the observed property.",
14
17
  )
15
18
  description: Optional[str] = Field(
16
- None, strip_whitespace=True,
17
- description='A description of the observed property.'
19
+ None,
20
+ strip_whitespace=True,
21
+ description="A description of the observed property.",
18
22
  )
19
23
  type: Optional[str] = Field(
20
- None, strip_whitespace=True, max_length=255,
21
- description='The type of the observed property.'
24
+ None,
25
+ strip_whitespace=True,
26
+ max_length=255,
27
+ description="The type of the observed property.",
22
28
  )
23
29
  code: Optional[str] = Field(
24
- None, strip_whitespace=True, max_length=255,
25
- description='A code representing the observed property.'
30
+ None,
31
+ strip_whitespace=True,
32
+ max_length=255,
33
+ description="A code representing the observed property.",
26
34
  )
27
35
 
28
36
 
@@ -5,16 +5,20 @@ from hydroserverpy.core.schemas.base import HydroServerCoreModel
5
5
 
6
6
  class ProcessingLevelFields(BaseModel):
7
7
  code: str = Field(
8
- ..., strip_whitespace=True, max_length=255,
9
- description='A code representing the processing level.'
8
+ ...,
9
+ strip_whitespace=True,
10
+ max_length=255,
11
+ description="A code representing the processing level.",
10
12
  )
11
13
  definition: Optional[str] = Field(
12
- None, strip_whitespace=True,
13
- description='The definition of the processing level.'
14
+ None,
15
+ strip_whitespace=True,
16
+ description="The definition of the processing level.",
14
17
  )
15
18
  explanation: Optional[str] = Field(
16
- None, strip_whitespace=True,
17
- description='The explanation of the processing level.'
19
+ None,
20
+ strip_whitespace=True,
21
+ description="The explanation of the processing level.",
18
22
  )
19
23
 
20
24
 
@@ -5,12 +5,15 @@ from hydroserverpy.core.schemas.base import HydroServerCoreModel
5
5
 
6
6
  class ResultQualifierFields(BaseModel):
7
7
  code: str = Field(
8
- ..., strip_whitespace=True, max_length=255,
9
- description='A code representing the result qualifier.'
8
+ ...,
9
+ strip_whitespace=True,
10
+ max_length=255,
11
+ description="A code representing the result qualifier.",
10
12
  )
11
13
  description: Optional[str] = Field(
12
- None, strip_whitespace=True,
13
- description='A description of the result qualifier.'
14
+ None,
15
+ strip_whitespace=True,
16
+ description="A description of the result qualifier.",
14
17
  )
15
18
 
16
19