hydroserverpy 0.2.3__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hydroserverpy might be problematic. Click here for more details.

Files changed (61) hide show
  1. hydroserverpy/__init__.py +6 -15
  2. hydroserverpy/core/endpoints/__init__.py +9 -0
  3. hydroserverpy/core/endpoints/base.py +133 -0
  4. hydroserverpy/core/endpoints/data_loaders.py +92 -0
  5. hydroserverpy/core/endpoints/data_sources.py +92 -0
  6. hydroserverpy/core/endpoints/datastreams.py +188 -0
  7. hydroserverpy/core/endpoints/observed_properties.py +93 -0
  8. hydroserverpy/core/endpoints/processing_levels.py +93 -0
  9. hydroserverpy/core/endpoints/result_qualifiers.py +93 -0
  10. hydroserverpy/core/endpoints/sensors.py +93 -0
  11. hydroserverpy/core/endpoints/things.py +240 -0
  12. hydroserverpy/core/endpoints/units.py +93 -0
  13. hydroserverpy/{components → core/schemas}/__init__.py +1 -2
  14. hydroserverpy/core/schemas/base.py +117 -0
  15. hydroserverpy/core/schemas/data_loaders.py +71 -0
  16. hydroserverpy/core/schemas/data_sources.py +206 -0
  17. hydroserverpy/core/schemas/datastreams.py +299 -0
  18. hydroserverpy/core/schemas/observed_properties.py +35 -0
  19. hydroserverpy/core/schemas/processing_levels.py +27 -0
  20. hydroserverpy/core/schemas/result_qualifiers.py +23 -0
  21. hydroserverpy/core/schemas/sensors.py +53 -0
  22. hydroserverpy/core/schemas/things.py +309 -0
  23. hydroserverpy/core/schemas/units.py +30 -0
  24. hydroserverpy/core/service.py +186 -0
  25. hydroserverpy/etl/__init__.py +0 -0
  26. hydroserverpy/{etl.py → etl/service.py} +32 -47
  27. hydroserverpy/quality/__init__.py +1 -0
  28. hydroserverpy/quality/service.py +391 -0
  29. {hydroserverpy-0.2.3.dist-info → hydroserverpy-0.3.0.dist-info}/METADATA +6 -3
  30. hydroserverpy-0.3.0.dist-info/RECORD +36 -0
  31. {hydroserverpy-0.2.3.dist-info → hydroserverpy-0.3.0.dist-info}/WHEEL +1 -1
  32. hydroserverpy/components/data_loaders.py +0 -67
  33. hydroserverpy/components/data_sources.py +0 -98
  34. hydroserverpy/components/datastreams.py +0 -47
  35. hydroserverpy/components/observed_properties.py +0 -48
  36. hydroserverpy/components/processing_levels.py +0 -48
  37. hydroserverpy/components/result_qualifiers.py +0 -48
  38. hydroserverpy/components/sensors.py +0 -48
  39. hydroserverpy/components/things.py +0 -48
  40. hydroserverpy/components/units.py +0 -48
  41. hydroserverpy/components/users.py +0 -28
  42. hydroserverpy/main.py +0 -62
  43. hydroserverpy/models.py +0 -218
  44. hydroserverpy/schemas/data_loaders.py +0 -27
  45. hydroserverpy/schemas/data_sources.py +0 -58
  46. hydroserverpy/schemas/datastreams.py +0 -56
  47. hydroserverpy/schemas/observed_properties.py +0 -33
  48. hydroserverpy/schemas/processing_levels.py +0 -33
  49. hydroserverpy/schemas/result_qualifiers.py +0 -32
  50. hydroserverpy/schemas/sensors.py +0 -39
  51. hydroserverpy/schemas/things.py +0 -108
  52. hydroserverpy/schemas/units.py +0 -32
  53. hydroserverpy/schemas/users.py +0 -28
  54. hydroserverpy/service.py +0 -170
  55. hydroserverpy/utils.py +0 -37
  56. hydroserverpy-0.2.3.dist-info/RECORD +0 -35
  57. /hydroserverpy/{schemas → core}/__init__.py +0 -0
  58. /hydroserverpy/{exceptions.py → etl/exceptions.py} +0 -0
  59. {hydroserverpy-0.2.3.dist-info → hydroserverpy-0.3.0.dist-info}/LICENSE +0 -0
  60. {hydroserverpy-0.2.3.dist-info → hydroserverpy-0.3.0.dist-info}/top_level.txt +0 -0
  61. {hydroserverpy-0.2.3.dist-info → hydroserverpy-0.3.0.dist-info}/zip-safe +0 -0
@@ -0,0 +1,117 @@
1
+ from pydantic import BaseModel, PrivateAttr, AliasGenerator, AliasChoices, computed_field
2
+ from pydantic.alias_generators import to_camel
3
+ from uuid import UUID
4
+ from typing import Optional
5
+
6
+
7
+ base_alias_generator = AliasGenerator(
8
+ serialization_alias=lambda field_name: to_camel(field_name),
9
+ validation_alias=lambda field_name: AliasChoices(to_camel(field_name), field_name),
10
+ )
11
+
12
+
13
+ class HydroServerBaseModel(BaseModel):
14
+ """
15
+ A base model for HydroServer entities that provides common attributes and functionality for HydroServer data.
16
+
17
+ :ivar _uid: A private attribute for storing the unique identifier (UUID) of the model.
18
+ """
19
+
20
+ _uid: Optional[UUID] = PrivateAttr()
21
+
22
+ def __init__(self, _uid: Optional[UUID] = None, **data):
23
+ """
24
+ Initialize a HydroServerBaseModel instance.
25
+
26
+ :param _uid: The unique identifier for the model.
27
+ :type _uid: Optional[UUID]
28
+ :param data: Additional attributes for the model.
29
+ """
30
+
31
+ super().__init__(**data)
32
+ self._uid = _uid
33
+
34
+ @computed_field
35
+ @property
36
+ def uid(self) -> Optional[UUID]:
37
+ """
38
+ The unique identifier (UUID) of the model.
39
+
40
+ :return: The UUID of the model.
41
+ :rtype: Optional[UUID]
42
+ """
43
+
44
+ return self._uid
45
+
46
+ class Config:
47
+ alias_generator = base_alias_generator
48
+ validate_assignment = True
49
+
50
+
51
+ class HydroServerCoreModel(HydroServerBaseModel):
52
+ """
53
+ A core model for HydroServer entities that includes methods for data manipulation and persistence.
54
+
55
+ :ivar _original_data: A private attribute storing the original data used to initialize the model.
56
+ """
57
+
58
+ _original_data: Optional[dict] = PrivateAttr()
59
+
60
+ def __init__(self, _endpoint, _uid: Optional[UUID] = None, **data):
61
+ """
62
+ Initialize a HydroServerCoreModel instance.
63
+
64
+ :param _endpoint: The endpoint associated with the model.
65
+ :param _uid: The unique identifier for the model.
66
+ :type _uid: Optional[UUID]
67
+ :param data: Additional attributes for the model.
68
+ """
69
+
70
+ super().__init__(_uid=_uid, **data)
71
+ self._endpoint = _endpoint
72
+ self._original_data = self.dict(by_alias=False).copy()
73
+
74
+ @property
75
+ def _patch_data(self) -> dict:
76
+ """
77
+ Generate a dictionary of modified data that needs to be patched on the server.
78
+
79
+ :return: A dictionary of modified attributes.
80
+ :rtype: dict
81
+ """
82
+
83
+ return {
84
+ key: getattr(self, key) for key, value in self._original_data.items()
85
+ if hasattr(self, key) and getattr(self, key) != value
86
+ }
87
+
88
+ def refresh(self) -> None:
89
+ """
90
+ Refresh the model with the latest data from the server.
91
+ """
92
+
93
+ entity = self._endpoint.get(uid=self.uid).model_dump(exclude=['uid'])
94
+ self._original_data = entity.dict(by_alias=False, exclude=['uid'])
95
+ self.__dict__.update(self._original_data)
96
+
97
+ def save(self) -> None:
98
+ """
99
+ Save the current state of the model to the server by updating modified attributes.
100
+ """
101
+
102
+ if self._patch_data:
103
+ entity = self._endpoint.update(uid=self.uid, **self._patch_data)
104
+ self._original_data = entity.dict(by_alias=False, exclude=['uid'])
105
+ self.__dict__.update(self._original_data)
106
+
107
+ def delete(self) -> None:
108
+ """
109
+ Delete the model from the server.
110
+
111
+ :raises AttributeError: If the model's UID is not set.
112
+ """
113
+
114
+ if not self._uid:
115
+ raise AttributeError('This resource cannot be deleted: UID is not set.')
116
+ self._endpoint.delete(uid=self._uid)
117
+ self._uid = None
@@ -0,0 +1,71 @@
1
+ from pydantic import BaseModel, Field
2
+ from typing import Optional, List, TYPE_CHECKING
3
+ from uuid import UUID
4
+ from hydroserverpy.core.schemas.base import HydroServerCoreModel
5
+
6
+ if TYPE_CHECKING:
7
+ from hydroserverpy.core.schemas.data_sources import DataSource
8
+
9
+
10
+ class DataLoaderFields(BaseModel):
11
+ name: str = Field(
12
+ ..., strip_whitespace=True, max_length=255,
13
+ description='The name of the data loader.'
14
+ )
15
+
16
+
17
+ class DataLoader(HydroServerCoreModel, DataLoaderFields):
18
+ """
19
+ A model representing a DataLoader, extending the core functionality of HydroServerCoreModel with additional
20
+ properties and methods.
21
+
22
+ :ivar _data_sources: A private attribute to cache the list of data sources associated with the DataLoader.
23
+ """
24
+
25
+ def __init__(self, _endpoint, _uid: Optional[UUID] = None, **data):
26
+ """
27
+ Initialize a DataLoader instance.
28
+
29
+ :param _endpoint: The endpoint associated with the data loader.
30
+ :type _endpoint: str
31
+ :param _uid: The unique identifier for the data loader.
32
+ :type _uid: Optional[UUID]
33
+ :param data: Additional attributes for the data loader.
34
+ """
35
+
36
+ super().__init__(_endpoint=_endpoint, _uid=_uid, **data)
37
+ self._data_sources = None
38
+
39
+ @property
40
+ def data_sources(self) -> List['DataSource']:
41
+ """
42
+ The data sources associated with the data loader. If not already cached, fetch the data sources from the server.
43
+
44
+ :return: A list of data sources associated with the data loader.
45
+ :rtype: List[DataSource]
46
+ """
47
+
48
+ if self._data_sources is None:
49
+ self._data_sources = self._endpoint.list_data_sources(uid=self.uid)
50
+
51
+ return self._data_sources
52
+
53
+ def refresh(self) -> None:
54
+ """
55
+ Refresh the data loader with the latest data from the server and update cached data sources.
56
+ """
57
+
58
+ entity = self._endpoint.get(uid=self.uid).model_dump(exclude=['uid'])
59
+ self._original_data = entity
60
+ self.__dict__.update(entity)
61
+ if self._data_sources is not None:
62
+ self._data_sources = self._endpoint.list_data_sources(uid=self.uid)
63
+
64
+ def load_observations(self) -> None:
65
+ """
66
+ Load observations data from a local file or a remote URL into HydroServer using all data sources associated with
67
+ this data loader.
68
+ """
69
+
70
+ for data_source in self.data_sources:
71
+ data_source.load_observations()
@@ -0,0 +1,206 @@
1
+ import tempfile
2
+ import io
3
+ from pydantic import BaseModel, Field
4
+ from typing import Optional, Literal, Union, List, TYPE_CHECKING
5
+ from datetime import datetime
6
+ from uuid import UUID
7
+ from urllib.request import urlopen
8
+ from hydroserverpy.core.schemas.base import HydroServerCoreModel
9
+ from hydroserverpy.etl.service import HydroServerETL
10
+
11
+ if TYPE_CHECKING:
12
+ from hydroserverpy.core.schemas.data_loaders import DataLoader
13
+ from hydroserverpy.core.schemas.datastreams import Datastream
14
+
15
+
16
+ class DataSourceFields(BaseModel):
17
+ name: str = Field(
18
+ ..., strip_whitespace=True, max_length=255,
19
+ description='The name of the data source.'
20
+ )
21
+ path: Optional[str] = Field(
22
+ None, strip_whitespace=True, max_length=255,
23
+ description='The path to a local data source file.'
24
+ )
25
+ link: Optional[str] = Field(
26
+ None, strip_whitespace=True, max_length=255,
27
+ description='The link to a remote data source file.'
28
+ )
29
+ header_row: Optional[int] = Field(
30
+ None, gt=0, lt=9999,
31
+ description='The row number where the data begins.'
32
+ )
33
+ data_start_row: Optional[int] = Field(
34
+ None, gt=0, lt=9999,
35
+ description='The row number where the data begins.'
36
+ )
37
+ delimiter: Optional[str] = Field(
38
+ ',', strip_whitespace=True, max_length=1,
39
+ description='The delimiter used by the data source file.'
40
+ )
41
+ quote_char: Optional[str] = Field(
42
+ '"', strip_whitespace=True, max_length=1,
43
+ description='The quote delimiter character used by the data source file.'
44
+ )
45
+ interval: Optional[int] = Field(
46
+ None, gt=0, lt=9999,
47
+ description='The time interval at which the data source should be loaded.'
48
+ )
49
+ interval_units: Optional[Literal['minutes', 'hours', 'days', 'weeks', 'months']] = Field(
50
+ None,
51
+ description='The interval units used by the data source file.'
52
+ )
53
+ crontab: Optional[str] = Field(
54
+ None, strip_whitespace=True, max_length=255,
55
+ description='The crontab used to schedule when the data source should be loaded.'
56
+ )
57
+ start_time: Optional[datetime] = Field(
58
+ None,
59
+ description='When the data source should begin being loaded.'
60
+ )
61
+ end_time: Optional[datetime] = Field(
62
+ None,
63
+ description='When the data source should stop being loaded.'
64
+ )
65
+ paused: Optional[bool] = Field(
66
+ False,
67
+ description='Whether loading the data source should be paused or not.'
68
+ )
69
+ timestamp_column: Union[int, str] = Field(
70
+ ..., strip_whitespace=True, max_length=255,
71
+ description='The column of the data source file containing the timestamps.'
72
+ )
73
+ timestamp_format: Optional[str] = Field(
74
+ '%Y-%m-%dT%H:%M:%S%Z', strip_whitespace=True, max_length=255,
75
+ description='The format of the timestamps, using Python\'s datetime strftime codes.'
76
+ )
77
+ timestamp_offset: Optional[str] = Field(
78
+ '+0000', strip_whitespace=True, max_length=255,
79
+ description='An ISO 8601 time zone offset designator code to be applied to timestamps in the data source file.'
80
+ )
81
+ data_loader_id: UUID = Field(
82
+ ...,
83
+ description='The ID of the data loader responsible for loading this data source.'
84
+ )
85
+ data_source_thru: Optional[datetime] = Field(
86
+ None,
87
+ description='The timestamp through which the data source contains data.'
88
+ )
89
+ last_sync_successful: Optional[bool] = Field(
90
+ None,
91
+ description='Whether the last data loading attempt was successful of not.'
92
+ )
93
+ last_sync_message: Optional[str] = Field(
94
+ None, strip_whitespace=True,
95
+ description='A message generated by the data loader it attempted to load data from this data source.'
96
+ )
97
+ last_synced: Optional[datetime] = Field(
98
+ None,
99
+ description='The last time the data loader attempted to load data from this data source.'
100
+ )
101
+ next_sync: Optional[datetime] = Field(
102
+ None,
103
+ description="The next time the data loader will attempt to load data from this data source."
104
+ )
105
+
106
+
107
+ class DataSource(HydroServerCoreModel, DataSourceFields):
108
+ """
109
+ A model representing a data source, extending the core functionality of HydroServerCoreModel with additional
110
+ properties and methods.
111
+
112
+ :ivar _datastreams: A private attribute to cache the list of datastreams associated with the data source.
113
+ :ivar _data_loader: A private attribute to cache the data loader associated with the data source.
114
+ """
115
+
116
+ def __init__(self, _endpoint, _uid: Optional[UUID] = None, **data):
117
+ """
118
+ Initialize a DataSource instance.
119
+
120
+ :param _endpoint: The endpoint associated with the DataSource.
121
+ :param _uid: The unique identifier for the DataSource.
122
+ :type _uid: Optional[UUID]
123
+ :param data: Additional attributes for the DataSource.
124
+ """
125
+
126
+ super().__init__(_endpoint=_endpoint, _uid=_uid, **data)
127
+ self._datastreams = None
128
+ self._data_loader = None
129
+
130
+ @property
131
+ def datastreams(self) -> List['Datastream']:
132
+ """
133
+ Retrieve the datastreams associated with the DataSource. If not already cached, fetch the datastreams from the
134
+ server.
135
+
136
+ :return: A list of datastreams associated with the data source.
137
+ :rtype: List[Datastream]
138
+ """
139
+
140
+ if self._datastreams is None:
141
+ self._datastreams = self._endpoint.list_datastreams(uid=self.uid)
142
+
143
+ return self._datastreams
144
+
145
+ @property
146
+ def data_loader(self) -> 'DataLoader':
147
+ """
148
+ Retrieve the data loader associated with the data source. If not already cached, fetch the data loader from the
149
+ server.
150
+
151
+ :return: The data loader associated with the data source.
152
+ :rtype: DataLoader
153
+ """
154
+
155
+ if self._data_loader is None:
156
+ self._data_loader = self._endpoint._service.dataloaders.get(uid=self.data_loader_id) # noqa
157
+
158
+ return self._data_loader
159
+
160
+ def refresh(self) -> None:
161
+ """
162
+ Refresh the data source with the latest data from the server and update cached datastreams and data loader if
163
+ they were previously loaded.
164
+ """
165
+
166
+ entity = self._endpoint.get(uid=self.uid).model_dump(exclude=['uid'])
167
+ self._original_data = entity
168
+ self.__dict__.update(entity)
169
+ if self._datastreams is not None:
170
+ self._datastreams = self._endpoint.list_datastreams(uid=self.uid)
171
+ if self._data_loader is not None:
172
+ self._data_loader = self._endpoint._service.dataloaders.get(uid=self.data_loader_id) # noqa
173
+
174
+ def load_observations(self) -> None:
175
+ """
176
+ Load observations data from a local file or a remote URL into HydroServer using this data source configuration.
177
+ """
178
+
179
+ if self.path:
180
+ with open(self.path, 'rb') as f:
181
+ with io.TextIOWrapper(f, encoding='utf-8') as data_file:
182
+ hs_etl = HydroServerETL(
183
+ service=getattr(self._endpoint, '_service'),
184
+ data_file=data_file,
185
+ data_source=self,
186
+ )
187
+ hs_etl.run()
188
+ elif self.link:
189
+ with tempfile.NamedTemporaryFile(mode='w+b') as temp_file:
190
+ with urlopen(self.link) as response:
191
+ chunk_size = 1024 * 1024 * 10 # Use a 10mb chunk size.
192
+ while True:
193
+ chunk = response.read(chunk_size)
194
+ if not chunk:
195
+ break
196
+ temp_file.write(chunk)
197
+ temp_file.seek(0)
198
+ with io.TextIOWrapper(temp_file, encoding='utf-8') as data_file:
199
+ hs_etl = HydroServerETL(
200
+ service=getattr(self._endpoint, '_service'),
201
+ data_file=data_file,
202
+ data_source=self,
203
+ )
204
+ hs_etl.run()
205
+ else:
206
+ return None
@@ -0,0 +1,299 @@
1
+ from pydantic import BaseModel, Field
2
+ from pandas import DataFrame
3
+ from typing import Optional, Literal, TYPE_CHECKING
4
+ from uuid import UUID
5
+ from datetime import datetime
6
+ from hydroserverpy.core.schemas.base import HydroServerCoreModel
7
+
8
+ if TYPE_CHECKING:
9
+ from hydroserverpy.core.schemas.things import Thing
10
+ from hydroserverpy.core.schemas.data_sources import DataSource
11
+ from hydroserverpy.core.schemas.sensors import Sensor
12
+ from hydroserverpy.core.schemas.units import Unit
13
+ from hydroserverpy.core.schemas.processing_levels import ProcessingLevel
14
+ from hydroserverpy.core.schemas.observed_properties import ObservedProperty
15
+
16
+
17
+ class DatastreamFields(BaseModel):
18
+ name: str = Field(
19
+ ..., strip_whitespace=True, max_length=255,
20
+ description='The name of the datastream.'
21
+ )
22
+ description: str = Field(
23
+ ..., strip_whitespace=True,
24
+ description='A description of the datastream.'
25
+ )
26
+ observation_type: str = Field(
27
+ ..., strip_whitespace=True, max_length=255,
28
+ description='The type of observation recorded in this datastream'
29
+ )
30
+ sampled_medium: str = Field(
31
+ ..., strip_whitespace=True, max_length=255,
32
+ description='The physical medium in which the observations were sampled.'
33
+ )
34
+ no_data_value: float = Field(
35
+ ...,
36
+ description='A numerical value representing no data at a given timestamp.',
37
+ )
38
+ aggregation_statistic: str = Field(
39
+ ..., strip_whitespace=True, max_length=255,
40
+ description='The statistic calculated over the time aggregation interval of observations in this datastream.'
41
+ )
42
+ time_aggregation_interval: float = Field(
43
+ ...,
44
+ description='The time interval over which the aggregation statistic is applied to observations.',
45
+ )
46
+ status: Optional[str] = Field(
47
+ None, strip_whitespace=True, max_length=255,
48
+ description='The current status of this datastream.'
49
+ )
50
+ result_type: str = Field(
51
+ ..., strip_whitespace=True, max_length=255,
52
+ description='The type of result recorded in this datastream.'
53
+ )
54
+ value_count: Optional[int] = Field(
55
+ None, ge=0,
56
+ description='The total number of observations in this datastream.'
57
+ )
58
+ phenomenon_begin_time: Optional[datetime] = Field(
59
+ None,
60
+ description='The timestamp representing when the first phenomenon recorded in this datastream occurred.'
61
+ )
62
+ phenomenon_end_time: Optional[datetime] = Field(
63
+ None,
64
+ description='The timestamp representing when the last phenomenon recorded in this datastream occurred.'
65
+ )
66
+ result_begin_time: Optional[datetime] = Field(
67
+ None,
68
+ description='The timestamp representing when the first observation of this datastream was recorded.'
69
+ )
70
+ result_end_time: Optional[datetime] = Field(
71
+ None,
72
+ description='The timestamp representing when the last observation of this datastream was recorded.'
73
+ )
74
+ data_source_id: Optional[UUID] = Field(
75
+ None,
76
+ description='The data source for observations of this datastream.'
77
+ )
78
+ data_source_column: Optional[str] = Field(
79
+ None, strip_whitespace=True, max_length=255,
80
+ description='The name of the column containing this datastream\'s observations in the data source file.'
81
+ )
82
+ is_visible: bool = Field(
83
+ True,
84
+ description='Whether this datastream is publicly visible.'
85
+ )
86
+ is_data_visible: bool = Field(
87
+ True,
88
+ description='Whether this observations associated with this datastream are publicly visible.'
89
+ )
90
+ thing_id: UUID = Field(
91
+ ...,
92
+ description='The site/thing from which observations of this datastream were recorded.'
93
+ )
94
+ sensor_id: UUID = Field(
95
+ ...,
96
+ description='The sensor used to record observations of this datastream.'
97
+ )
98
+ observed_property_id: UUID = Field(
99
+ ...,
100
+ description='The physical property being observed for this datastream.'
101
+ )
102
+ processing_level_id: UUID = Field(
103
+ ...,
104
+ description='The processing level applied to this datastream.'
105
+ )
106
+ unit_id: UUID = Field(
107
+ ...,
108
+ description='The unit used to record observations for this datastream.'
109
+ )
110
+ time_aggregation_interval_units: Literal['seconds', 'minutes', 'hours', 'days'] = Field(
111
+ ...,
112
+ description='The time unit for this datastream\'s time aggregation interval'
113
+ )
114
+ intended_time_spacing: Optional[float] = Field(
115
+ None,
116
+ description='The time interval at which observations should be made for this datastream.'
117
+ )
118
+ intended_time_spacing_units: Optional[Literal['seconds', 'minutes', 'hours', 'days']] = Field(
119
+ None,
120
+ description='The time unit for this datastream\'s intended time spacing interval'
121
+ )
122
+
123
+
124
+ class Datastream(HydroServerCoreModel, DatastreamFields):
125
+ """
126
+ A model representing a datastream, extending the core functionality of HydroServerCoreModel with additional
127
+ properties and methods.
128
+
129
+ :ivar _thing: A private attribute to cache the associated thing entity.
130
+ :ivar _data_source: A private attribute to cache the associated data source entity.
131
+ :ivar _observed_property: A private attribute to cache the associated observed property entity.
132
+ :ivar _processing_level: A private attribute to cache the associated processing level entity.
133
+ :ivar _unit: A private attribute to cache the associated unit entity.
134
+ :ivar _sensor: A private attribute to cache the associated sensor entity.
135
+ """
136
+
137
+ def __init__(self, _endpoint, _uid: Optional[UUID] = None, **data):
138
+ """
139
+ Initialize a Datastream instance.
140
+
141
+ :param _endpoint: The endpoint associated with the Datastream.
142
+ :param _uid: The unique identifier for the Datastream.
143
+ :type _uid: Optional[UUID]
144
+ :param data: Additional attributes for the Datastream.
145
+ """
146
+
147
+ super().__init__(_endpoint=_endpoint, _uid=_uid, **data)
148
+ self._thing = None
149
+ self._data_source = None
150
+ self._observed_property = None
151
+ self._processing_level = None
152
+ self._unit = None
153
+ self._sensor = None
154
+
155
+ @property
156
+ def thing(self) -> 'Thing':
157
+ """
158
+ The thing entity associated with the datastream. If not already cached, fetch it from the server.
159
+
160
+ :return: The thing entity associated with the datastream.
161
+ :rtype: Thing
162
+ """
163
+
164
+ if self._thing is None:
165
+ self._thing = self._endpoint._service.things.get(uid=self.thing_id) # noqa
166
+
167
+ return self._thing
168
+
169
+ @property
170
+ def data_source(self) -> 'DataSource':
171
+ """
172
+ The data source entity associated with the datastream. If not already cached, fetch it from the server.
173
+
174
+ :return: The data source entity associated with the datastream.
175
+ :rtype: DataSource
176
+ """
177
+
178
+ if self._data_source is None:
179
+ self._data_source = self._endpoint._service.datasources.get(uid=self.data_source_id) # noqa
180
+
181
+ return self._data_source
182
+
183
+ @property
184
+ def observed_property(self) -> 'ObservedProperty':
185
+ """
186
+ Retrieve the observed property entity associated with the datastream. If not already cached, fetch it from the
187
+ server.
188
+
189
+ :return: The observed property entity associated with the datastream.
190
+ :rtype: ObservedProperty
191
+ """
192
+
193
+ if self._observed_property is None:
194
+ self._observed_property = self._endpoint._service.observedproperties.get(uid=self.observed_property_id) # noqa
195
+
196
+ return self._observed_property
197
+
198
+ @property
199
+ def processing_level(self) -> 'ProcessingLevel':
200
+ """
201
+ Retrieve the processing level entity associated with the datastream. If not already cached, fetch it from the
202
+ server.
203
+
204
+ :return: The processing level entity associated with the datastream.
205
+ :rtype: ProcessingLevel
206
+ """
207
+
208
+ if self._processing_level is None:
209
+ self._processing_level = self._endpoint._service.processinglevels.get(uid=self.processing_level_id) # noqa
210
+
211
+ return self._processing_level
212
+
213
+ @property
214
+ def unit(self) -> 'Unit':
215
+ """
216
+ Retrieve the unit entity associated with the datastream. If not already cached, fetch it from the server.
217
+
218
+ :return: The unit entity associated with the datastream.
219
+ :rtype: Unit
220
+ """
221
+
222
+ if self._unit is None:
223
+ self._unit = self._endpoint._service.units.get(uid=self.unit_id) # noqa
224
+
225
+ return self._unit
226
+
227
+ @property
228
+ def sensor(self) -> 'Sensor':
229
+ """
230
+ Retrieve the sensor entity associated with the datastream. If not already cached, fetch it from the server.
231
+
232
+ :return: The sensor entity associated with the datastream.
233
+ :rtype: Any
234
+ """
235
+
236
+ if self._sensor is None:
237
+ self._sensor = self._endpoint._service.sensors.get(uid=self.sensor_id) # noqa
238
+
239
+ return self._sensor
240
+
241
+ def refresh(self) -> None:
242
+ """
243
+ Refresh the datastream with the latest data from the server and update cached entities if they were previously
244
+ loaded.
245
+ """
246
+
247
+ entity = self._endpoint.get(uid=self.uid).model_dump(exclude=['uid'])
248
+ self._original_data = entity
249
+ self.__dict__.update(entity)
250
+ if self._thing is not None:
251
+ self._thing = self._endpoint._service.things.get(uid=self.thing_id) # noqa
252
+ if self._data_source is not None:
253
+ self._data_source = self._endpoint._service.datasources.get(uid=self.data_source_id) # noqa
254
+ if self._observed_property is not None:
255
+ self._observed_property = self._endpoint._service.observedproperties.get(uid=self.observed_property_id) # noqa
256
+ if self._processing_level is not None:
257
+ self._processing_level = self._endpoint._service.processinglevels.get(uid=self.processing_level_id) # noqa
258
+ if self._unit is not None:
259
+ self._unit = self._endpoint._service.units.get(uid=self.unit_id) # noqa
260
+ if self._sensor is not None:
261
+ self._sensor = self._endpoint._service.sensors.get(uid=self.sensor_id) # noqa
262
+
263
+ def get_observations(
264
+ self,
265
+ start_time: datetime = None,
266
+ end_time: datetime = None,
267
+ page: int = 1,
268
+ page_size: int = 100000,
269
+ include_quality: bool = False,
270
+ fetch_all: bool = False
271
+ ) -> DataFrame:
272
+ """
273
+ Retrieve the observations for this datastream.
274
+
275
+ :return: A DataFrame containing the observations associated with the datastream.
276
+ :rtype: DataFrame
277
+ """
278
+
279
+ return self._endpoint.get_observations(
280
+ uid=self.uid, start_time=start_time, end_time=end_time, page=page, page_size=page_size,
281
+ include_quality=include_quality, fetch_all=fetch_all
282
+ )
283
+
284
+ def load_observations(
285
+ self,
286
+ observations: DataFrame,
287
+ ) -> None:
288
+ """
289
+ Load a DataFrame of observations to the datastream.
290
+
291
+ :param observations: A pandas DataFrame containing the observations to be uploaded.
292
+ :type observations: DataFrame
293
+ :return: None
294
+ """
295
+
296
+ return self._endpoint.load_observations(
297
+ uid=self.uid,
298
+ observations=observations,
299
+ )