cwms-python 1.0.1__tar.gz → 1.0.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {cwms_python-1.0.1 → cwms_python-1.0.7}/PKG-INFO +19 -1
- {cwms_python-1.0.1 → cwms_python-1.0.7}/README.md +18 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/__init__.py +1 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/api.py +93 -28
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/catalog/blobs.py +25 -8
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/catalog/catalog.py +6 -2
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/catalog/clobs.py +44 -23
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/outlets/outlets.py +1 -1
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/timeseries/timeseries.py +75 -5
- cwms_python-1.0.7/cwms/users/users.py +203 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/utils/checks.py +12 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/pyproject.toml +1 -2
- {cwms_python-1.0.1 → cwms_python-1.0.7}/LICENSE +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/cwms_types.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/forecast/forecast_instance.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/forecast/forecast_spec.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/levels/location_levels.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/levels/specified_levels.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/locations/gate_changes.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/locations/location_groups.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/locations/physical_locations.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/measurements/measurements.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/outlets/virtual_outlets.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/projects/project_lock_rights.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/projects/project_locks.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/projects/projects.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/projects/water_supply/accounting.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/ratings/ratings.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/ratings/ratings_spec.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/ratings/ratings_template.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/standard_text/standard_text.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/timeseries/timeseries_bin.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/timeseries/timeseries_group.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/timeseries/timeseries_identifier.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/timeseries/timeseries_profile.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/timeseries/timeseries_profile_instance.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/timeseries/timeseries_profile_parser.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/timeseries/timeseries_txt.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/turbines/turbines.py +0 -0
- {cwms_python-1.0.1 → cwms_python-1.0.7}/cwms/utils/__init__.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: cwms-python
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.7
|
|
4
4
|
Summary: Corps water management systems (CWMS) REST API for Data Retrieval of USACE water data
|
|
5
5
|
License: LICENSE
|
|
6
6
|
License-File: LICENSE
|
|
@@ -44,6 +44,24 @@ Then import the package:
|
|
|
44
44
|
import cwms
|
|
45
45
|
```
|
|
46
46
|
|
|
47
|
+
### Authentication
|
|
48
|
+
|
|
49
|
+
`cwms.init_session()` supports both CDA API keys and Keycloak access tokens.
|
|
50
|
+
Use `api_key=` for the headless CDA API key flow, or `token=` for an OIDC access
|
|
51
|
+
token such as one saved by [`cwms-cli login`]().
|
|
52
|
+
|
|
53
|
+
```python
|
|
54
|
+
import cwms
|
|
55
|
+
|
|
56
|
+
cwms.init_session(
|
|
57
|
+
api_root="https://cwms-data.usace.army.mil/cwms-data/",
|
|
58
|
+
token="ACCESS_TOKEN",
|
|
59
|
+
)
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
If both `token` and `api_key` are provided, `cwms-python` will use the token
|
|
63
|
+
and log a warning.
|
|
64
|
+
|
|
47
65
|
## Getting Started
|
|
48
66
|
|
|
49
67
|
```python
|
|
@@ -20,6 +20,24 @@ Then import the package:
|
|
|
20
20
|
import cwms
|
|
21
21
|
```
|
|
22
22
|
|
|
23
|
+
### Authentication
|
|
24
|
+
|
|
25
|
+
`cwms.init_session()` supports both CDA API keys and Keycloak access tokens.
|
|
26
|
+
Use `api_key=` for the headless CDA API key flow, or `token=` for an OIDC access
|
|
27
|
+
token such as one saved by [`cwms-cli login`]().
|
|
28
|
+
|
|
29
|
+
```python
|
|
30
|
+
import cwms
|
|
31
|
+
|
|
32
|
+
cwms.init_session(
|
|
33
|
+
api_root="https://cwms-data.usace.army.mil/cwms-data/",
|
|
34
|
+
token="ACCESS_TOKEN",
|
|
35
|
+
)
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
If both `token` and `api_key` are provided, `cwms-python` will use the token
|
|
39
|
+
and log a warning.
|
|
40
|
+
|
|
23
41
|
## Getting Started
|
|
24
42
|
|
|
25
43
|
```python
|
|
@@ -31,6 +31,7 @@ from cwms.timeseries.timeseries_profile_instance import *
|
|
|
31
31
|
from cwms.timeseries.timeseries_profile_parser import *
|
|
32
32
|
from cwms.timeseries.timeseries_txt import *
|
|
33
33
|
from cwms.turbines.turbines import *
|
|
34
|
+
from cwms.users.users import *
|
|
34
35
|
|
|
35
36
|
try:
|
|
36
37
|
__version__ = version("cwms-python")
|
|
@@ -5,9 +5,9 @@ functions should be used internally to interact with the API. The user should no
|
|
|
5
5
|
interact with these directly.
|
|
6
6
|
|
|
7
7
|
The `init_session()` function can be used to specify an alternative root URL, and to
|
|
8
|
-
provide an authentication key (if required). If `init_session()` is not
|
|
9
|
-
default root URL (see `API_ROOT` below) will be used, and no authentication
|
|
10
|
-
included when making API calls.
|
|
8
|
+
provide an authentication key or bearer token (if required). If `init_session()` is not
|
|
9
|
+
called, the default root URL (see `API_ROOT` below) will be used, and no authentication
|
|
10
|
+
headers will be included when making API calls.
|
|
11
11
|
|
|
12
12
|
Example: Initializing a session
|
|
13
13
|
|
|
@@ -17,6 +17,9 @@ Example: Initializing a session
|
|
|
17
17
|
# Specify an alternate URL and an auth key
|
|
18
18
|
init_session(api_root="https://example.com/cwms-data", api_key="API_KEY")
|
|
19
19
|
|
|
20
|
+
# Specify an alternate URL and an OIDC bearer token
|
|
21
|
+
init_session(api_root="https://example.com/cwms-data", token="ACCESS_TOKEN")
|
|
22
|
+
|
|
20
23
|
Functions which make API calls that _may_ return a JSON response will return a `dict`
|
|
21
24
|
containing the deserialized data. If the API response does not include data, an empty
|
|
22
25
|
`dict` will be returned.
|
|
@@ -34,6 +37,7 @@ from json import JSONDecodeError
|
|
|
34
37
|
from typing import Any, Optional, cast
|
|
35
38
|
|
|
36
39
|
from requests import Response, adapters
|
|
40
|
+
from requests.exceptions import RetryError as RequestsRetryError
|
|
37
41
|
from requests_toolbelt import sessions # type: ignore
|
|
38
42
|
from requests_toolbelt.sessions import BaseUrlSession # type: ignore
|
|
39
43
|
from urllib3.util.retry import Retry
|
|
@@ -52,12 +56,12 @@ retry_strategy = Retry(
|
|
|
52
56
|
status_forcelist=[
|
|
53
57
|
403,
|
|
54
58
|
429,
|
|
55
|
-
500,
|
|
56
59
|
502,
|
|
57
60
|
503,
|
|
58
61
|
504,
|
|
59
62
|
], # Example: also retry on these HTTP status codes
|
|
60
63
|
allowed_methods=["GET", "PUT", "POST", "PATCH", "DELETE"], # Methods to retry
|
|
64
|
+
raise_on_status=False,
|
|
61
65
|
)
|
|
62
66
|
SESSION = sessions.BaseUrlSession(base_url=API_ROOT)
|
|
63
67
|
adapter = adapters.HTTPAdapter(
|
|
@@ -78,10 +82,14 @@ class ApiError(Exception):
|
|
|
78
82
|
a concise, single-line error message with an optional hint.
|
|
79
83
|
"""
|
|
80
84
|
|
|
81
|
-
def __init__(self, response: Response):
|
|
85
|
+
def __init__(self, response: Response, message: Optional[str] = None):
|
|
82
86
|
self.response = response
|
|
87
|
+
self.message = message
|
|
83
88
|
|
|
84
89
|
def __str__(self) -> str:
|
|
90
|
+
if self.message:
|
|
91
|
+
return self.message
|
|
92
|
+
|
|
85
93
|
# Include the request URL in the error message.
|
|
86
94
|
message = f"CWMS API Error ({self.response.url})"
|
|
87
95
|
|
|
@@ -125,21 +133,54 @@ class ApiError(Exception):
|
|
|
125
133
|
return ""
|
|
126
134
|
|
|
127
135
|
|
|
136
|
+
class NotFoundError(ApiError):
|
|
137
|
+
"""Raised when a requested CDA resource does not exist."""
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
class PermissionError(ApiError):
|
|
141
|
+
"""Raised when the CDA request is not authorized for the current caller."""
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def _unwrap_retry_error(error: RequestsRetryError) -> Exception:
|
|
145
|
+
"""Return the original retry cause when requests wraps it in RetryError."""
|
|
146
|
+
|
|
147
|
+
current: Exception = error
|
|
148
|
+
cause = error.__cause__
|
|
149
|
+
while isinstance(cause, Exception):
|
|
150
|
+
current = cause
|
|
151
|
+
cause = cause.__cause__
|
|
152
|
+
|
|
153
|
+
if current is error and error.args:
|
|
154
|
+
first_arg = error.args[0]
|
|
155
|
+
if isinstance(first_arg, Exception):
|
|
156
|
+
current = first_arg
|
|
157
|
+
reason = getattr(current, "reason", None)
|
|
158
|
+
while isinstance(reason, Exception):
|
|
159
|
+
current = reason
|
|
160
|
+
reason = getattr(current, "reason", None)
|
|
161
|
+
|
|
162
|
+
return current
|
|
163
|
+
|
|
164
|
+
|
|
128
165
|
def init_session(
|
|
129
166
|
*,
|
|
130
167
|
api_root: Optional[str] = None,
|
|
131
168
|
api_key: Optional[str] = None,
|
|
169
|
+
token: Optional[str] = None,
|
|
132
170
|
pool_connections: int = 100,
|
|
133
171
|
) -> BaseUrlSession:
|
|
134
|
-
"""Specify a root URL and authentication
|
|
172
|
+
"""Specify a root URL and authentication credentials for the CWMS Data API.
|
|
135
173
|
|
|
136
174
|
This function can be used to change the root URL used when interacting with the CDA.
|
|
137
|
-
All API calls made after this function is called will use the specified URL. If
|
|
138
|
-
authentication
|
|
175
|
+
All API calls made after this function is called will use the specified URL. If
|
|
176
|
+
authentication credentials are given they will be included in all future request
|
|
177
|
+
headers.
|
|
139
178
|
|
|
140
179
|
Keyword Args:
|
|
141
180
|
api_root (optional): The root URL for the CWMS Data API.
|
|
142
181
|
api_key (optional): An authentication key.
|
|
182
|
+
token (optional): A Keycloak access token. If both token and api_key are
|
|
183
|
+
provided, token is used.
|
|
143
184
|
|
|
144
185
|
Returns:
|
|
145
186
|
Returns the updated session object.
|
|
@@ -157,10 +198,18 @@ def init_session(
|
|
|
157
198
|
max_retries=retry_strategy,
|
|
158
199
|
)
|
|
159
200
|
SESSION.mount("https://", adapter)
|
|
160
|
-
if
|
|
201
|
+
if token:
|
|
202
|
+
if api_key:
|
|
203
|
+
logging.warning(
|
|
204
|
+
"Both token and api_key were provided to init_session(); using token for Authorization."
|
|
205
|
+
)
|
|
206
|
+
# Ensure we don't provide the bearer text twice
|
|
207
|
+
if token.lower().startswith("bearer "):
|
|
208
|
+
token = token[7:]
|
|
209
|
+
SESSION.headers.update({"Authorization": "Bearer " + token})
|
|
210
|
+
elif api_key:
|
|
161
211
|
if api_key.startswith("apikey "):
|
|
162
212
|
api_key = api_key.replace("apikey ", "")
|
|
163
|
-
logging.debug(f"Setting authorization key: api_key={api_key}")
|
|
164
213
|
SESSION.headers.update({"Authorization": "apikey " + api_key})
|
|
165
214
|
|
|
166
215
|
return SESSION
|
|
@@ -281,11 +330,14 @@ def get(
|
|
|
281
330
|
"""
|
|
282
331
|
|
|
283
332
|
headers = {"Accept": api_version_text(api_version)}
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
333
|
+
try:
|
|
334
|
+
with SESSION.get(endpoint, params=params, headers=headers) as response:
|
|
335
|
+
if not response.ok:
|
|
336
|
+
logging.error(f"CDA Error: response={response}")
|
|
337
|
+
raise ApiError(response)
|
|
338
|
+
return _process_response(response)
|
|
339
|
+
except RequestsRetryError as error:
|
|
340
|
+
raise _unwrap_retry_error(error) from None
|
|
289
341
|
|
|
290
342
|
|
|
291
343
|
def get_with_paging(
|
|
@@ -340,11 +392,16 @@ def _post_function(
|
|
|
340
392
|
headers = {"accept": "*/*", "Content-Type": api_version_text(api_version)}
|
|
341
393
|
if isinstance(data, dict) or isinstance(data, list):
|
|
342
394
|
data = json.dumps(data)
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
395
|
+
try:
|
|
396
|
+
with SESSION.post(
|
|
397
|
+
endpoint, params=params, headers=headers, data=data
|
|
398
|
+
) as response:
|
|
399
|
+
if not response.ok:
|
|
400
|
+
logging.error(f"CDA Error: response={response}")
|
|
401
|
+
raise ApiError(response)
|
|
402
|
+
return response
|
|
403
|
+
except RequestsRetryError as error:
|
|
404
|
+
raise _unwrap_retry_error(error) from None
|
|
348
405
|
|
|
349
406
|
|
|
350
407
|
def post(
|
|
@@ -434,10 +491,15 @@ def patch(
|
|
|
434
491
|
|
|
435
492
|
if data and isinstance(data, dict) or isinstance(data, list):
|
|
436
493
|
data = json.dumps(data)
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
494
|
+
try:
|
|
495
|
+
with SESSION.patch(
|
|
496
|
+
endpoint, params=params, headers=headers, data=data
|
|
497
|
+
) as response:
|
|
498
|
+
if not response.ok:
|
|
499
|
+
logging.error(f"CDA Error: response={response}")
|
|
500
|
+
raise ApiError(response)
|
|
501
|
+
except RequestsRetryError as error:
|
|
502
|
+
raise _unwrap_retry_error(error) from None
|
|
441
503
|
|
|
442
504
|
|
|
443
505
|
def delete(
|
|
@@ -461,7 +523,10 @@ def delete(
|
|
|
461
523
|
"""
|
|
462
524
|
|
|
463
525
|
headers = {"Accept": api_version_text(api_version)}
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
526
|
+
try:
|
|
527
|
+
with SESSION.delete(endpoint, params=params, headers=headers) as response:
|
|
528
|
+
if not response.ok:
|
|
529
|
+
logging.error(f"CDA Error: response={response}")
|
|
530
|
+
raise ApiError(response)
|
|
531
|
+
except RequestsRetryError as error:
|
|
532
|
+
raise _unwrap_retry_error(error) from None
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
import base64
|
|
2
|
-
from typing import Optional
|
|
2
|
+
from typing import Any, Optional
|
|
3
3
|
|
|
4
4
|
import cwms.api as api
|
|
5
5
|
from cwms.cwms_types import JSON, Data
|
|
6
|
-
from cwms.utils.checks import is_base64
|
|
6
|
+
from cwms.utils.checks import has_invalid_chars, is_base64
|
|
7
7
|
|
|
8
8
|
STORE_DICT = """data = {
|
|
9
9
|
"office-id": "SWT",
|
|
@@ -14,6 +14,8 @@ STORE_DICT = """data = {
|
|
|
14
14
|
}
|
|
15
15
|
"""
|
|
16
16
|
|
|
17
|
+
IGNORED_ID = "ignored"
|
|
18
|
+
|
|
17
19
|
|
|
18
20
|
def get_blob(blob_id: str, office_id: str) -> str:
|
|
19
21
|
"""Get a single BLOB (Binary Large Object).
|
|
@@ -29,8 +31,13 @@ def get_blob(blob_id: str, office_id: str) -> str:
|
|
|
29
31
|
str: the value returned based on the content-type it was stored with as a string
|
|
30
32
|
"""
|
|
31
33
|
|
|
32
|
-
|
|
33
|
-
|
|
34
|
+
params: dict[str, Any] = {}
|
|
35
|
+
if has_invalid_chars(blob_id):
|
|
36
|
+
endpoint = f"blobs/{IGNORED_ID}"
|
|
37
|
+
params["blob-id"] = blob_id
|
|
38
|
+
else:
|
|
39
|
+
endpoint = f"blobs/{blob_id}"
|
|
40
|
+
params["office"] = office_id
|
|
34
41
|
response = api.get(endpoint, params, api_version=1)
|
|
35
42
|
return str(response)
|
|
36
43
|
|
|
@@ -107,8 +114,13 @@ def delete_blob(blob_id: str, office_id: str) -> None:
|
|
|
107
114
|
None
|
|
108
115
|
"""
|
|
109
116
|
|
|
110
|
-
|
|
111
|
-
|
|
117
|
+
params: dict[str, Any] = {}
|
|
118
|
+
if has_invalid_chars(blob_id):
|
|
119
|
+
endpoint = f"blobs/{IGNORED_ID}"
|
|
120
|
+
params["blob-id"] = blob_id
|
|
121
|
+
else:
|
|
122
|
+
endpoint = f"blobs/{blob_id}"
|
|
123
|
+
params["office"] = office_id
|
|
112
124
|
return api.delete(endpoint, params, api_version=1)
|
|
113
125
|
|
|
114
126
|
|
|
@@ -143,6 +155,11 @@ def update_blob(data: JSON, fail_if_not_exists: Optional[bool] = True) -> None:
|
|
|
143
155
|
|
|
144
156
|
blob_id = data.get("id", "").upper()
|
|
145
157
|
|
|
146
|
-
|
|
147
|
-
|
|
158
|
+
params: dict[str, Any] = {}
|
|
159
|
+
if has_invalid_chars(blob_id):
|
|
160
|
+
endpoint = f"blobs/{IGNORED_ID}"
|
|
161
|
+
params["blob-id"] = blob_id
|
|
162
|
+
else:
|
|
163
|
+
endpoint = f"blobs/{blob_id}"
|
|
164
|
+
params["fail-if-not-exists"] = fail_if_not_exists
|
|
148
165
|
return api.patch(endpoint, data, params, api_version=1)
|
|
@@ -67,7 +67,9 @@ def get_locations_catalog(
|
|
|
67
67
|
"location-kind-like": location_kind_like,
|
|
68
68
|
}
|
|
69
69
|
|
|
70
|
-
response = api.
|
|
70
|
+
response = api.get_with_paging(
|
|
71
|
+
endpoint=endpoint, selector="entries", params=params, api_version=2
|
|
72
|
+
)
|
|
71
73
|
return Data(response, selector="entries")
|
|
72
74
|
|
|
73
75
|
|
|
@@ -131,7 +133,9 @@ def get_timeseries_catalog(
|
|
|
131
133
|
"include-extents": include_extents,
|
|
132
134
|
}
|
|
133
135
|
|
|
134
|
-
response = api.
|
|
136
|
+
response = api.get_with_paging(
|
|
137
|
+
endpoint=endpoint, selector="entries", params=params, api_version=2
|
|
138
|
+
)
|
|
135
139
|
return Data(response, selector="entries")
|
|
136
140
|
|
|
137
141
|
|
|
@@ -1,10 +1,21 @@
|
|
|
1
|
-
from typing import Optional
|
|
1
|
+
from typing import Any, Optional
|
|
2
2
|
|
|
3
3
|
import cwms.api as api
|
|
4
4
|
from cwms.cwms_types import JSON, Data
|
|
5
|
+
from cwms.utils.checks import has_invalid_chars
|
|
5
6
|
|
|
7
|
+
STORE_DICT = """data = {
|
|
8
|
+
"office-id": "SWT",
|
|
9
|
+
"id": "CLOB_ID",
|
|
10
|
+
"description": "Your description here",
|
|
11
|
+
"value": "STRING of content"
|
|
12
|
+
}
|
|
13
|
+
"""
|
|
6
14
|
|
|
7
|
-
|
|
15
|
+
IGNORED_ID = "ignored"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def get_clob(clob_id: str, office_id: str) -> Data:
|
|
8
19
|
"""Get a single clob.
|
|
9
20
|
|
|
10
21
|
Parameters
|
|
@@ -13,16 +24,6 @@ def get_clob(clob_id: str, office_id: str, clob_id_query: Optional[str] = None)
|
|
|
13
24
|
Specifies the id of the clob
|
|
14
25
|
office_id: string
|
|
15
26
|
Specifies the office of the clob.
|
|
16
|
-
clob_id_query: string
|
|
17
|
-
If this query parameter is provided the id path parameter is ignored and the
|
|
18
|
-
value of the query parameter is used. Note: this query parameter is necessary
|
|
19
|
-
for id's that contain '/' or other special characters. Because of abuse even
|
|
20
|
-
properly escaped '/' in url paths are blocked. When using this query parameter
|
|
21
|
-
a valid path parameter must still be provided for the request to be properly
|
|
22
|
-
routed. If your clob id contains '/' you can't specify the clob-id query
|
|
23
|
-
parameter and also specify the id path parameter because firewall and/or server
|
|
24
|
-
rules will deny the request even though you are specifying this override. "ignored"
|
|
25
|
-
is suggested.
|
|
26
27
|
|
|
27
28
|
|
|
28
29
|
Returns
|
|
@@ -30,11 +31,13 @@ def get_clob(clob_id: str, office_id: str, clob_id_query: Optional[str] = None)
|
|
|
30
31
|
cwms data type. data.json will return the JSON output and data.df will return a dataframe
|
|
31
32
|
"""
|
|
32
33
|
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
"
|
|
36
|
-
"clob-id
|
|
37
|
-
|
|
34
|
+
params: dict[str, Any] = {}
|
|
35
|
+
if has_invalid_chars(clob_id):
|
|
36
|
+
endpoint = f"clobs/{IGNORED_ID}"
|
|
37
|
+
params["clob-id"] = clob_id
|
|
38
|
+
else:
|
|
39
|
+
endpoint = f"clobs/{clob_id}"
|
|
40
|
+
params["office"] = office_id
|
|
38
41
|
response = api.get(endpoint, params)
|
|
39
42
|
return Data(response)
|
|
40
43
|
|
|
@@ -90,13 +93,20 @@ def delete_clob(clob_id: str, office_id: str) -> None:
|
|
|
90
93
|
None
|
|
91
94
|
"""
|
|
92
95
|
|
|
93
|
-
|
|
94
|
-
|
|
96
|
+
params: dict[str, Any] = {}
|
|
97
|
+
if has_invalid_chars(clob_id):
|
|
98
|
+
endpoint = f"clobs/{IGNORED_ID}"
|
|
99
|
+
params["clob-id"] = clob_id
|
|
100
|
+
else:
|
|
101
|
+
endpoint = f"clobs/{clob_id}"
|
|
102
|
+
params["office"] = office_id
|
|
95
103
|
|
|
96
104
|
return api.delete(endpoint, params=params, api_version=1)
|
|
97
105
|
|
|
98
106
|
|
|
99
|
-
def update_clob(
|
|
107
|
+
def update_clob(
|
|
108
|
+
data: JSON, clob_id: Optional[str] = None, ignore_nulls: Optional[bool] = True
|
|
109
|
+
) -> None:
|
|
100
110
|
"""Updates clob
|
|
101
111
|
|
|
102
112
|
Parameters
|
|
@@ -110,7 +120,7 @@ def update_clob(data: JSON, clob_id: str, ignore_nulls: Optional[bool] = True) -
|
|
|
110
120
|
"value": "string"
|
|
111
121
|
}
|
|
112
122
|
clob_id: string
|
|
113
|
-
Specifies the id of the clob to be deleted
|
|
123
|
+
Specifies the id of the clob to be deleted. Unused if "id" is present in JSON data.
|
|
114
124
|
ignore_nulls: Boolean
|
|
115
125
|
If true, null and empty fields in the provided clob will be ignored and the existing value of those fields left in place. Default: true
|
|
116
126
|
|
|
@@ -122,8 +132,19 @@ def update_clob(data: JSON, clob_id: str, ignore_nulls: Optional[bool] = True) -
|
|
|
122
132
|
if not isinstance(data, dict):
|
|
123
133
|
raise ValueError("Cannot store a Clob without a JSON data dictionary")
|
|
124
134
|
|
|
125
|
-
|
|
126
|
-
|
|
135
|
+
if "id" in data:
|
|
136
|
+
clob_id = data.get("id", "").upper()
|
|
137
|
+
|
|
138
|
+
if clob_id is None:
|
|
139
|
+
raise ValueError(f"Cannot update a Clob without an 'id' field:\n{STORE_DICT}")
|
|
140
|
+
|
|
141
|
+
params: dict[str, Any] = {}
|
|
142
|
+
if has_invalid_chars(clob_id):
|
|
143
|
+
endpoint = f"clobs/{IGNORED_ID}"
|
|
144
|
+
params["clob-id"] = clob_id
|
|
145
|
+
else:
|
|
146
|
+
endpoint = f"clobs/{clob_id}"
|
|
147
|
+
params["ignore-nulls"] = ignore_nulls
|
|
127
148
|
|
|
128
149
|
return api.patch(endpoint, data, params, api_version=1)
|
|
129
150
|
|
|
@@ -228,8 +228,21 @@ def combine_timeseries_results(results: List[Data]) -> Data:
|
|
|
228
228
|
combined_json["end"] = combined_df["date-time"].max().isoformat()
|
|
229
229
|
combined_json["total"] = len(combined_df)
|
|
230
230
|
|
|
231
|
+
combined_df["date-time"] = combined_df["date-time"].apply(
|
|
232
|
+
lambda x: int(pd.Timestamp(x).timestamp() * 1000)
|
|
233
|
+
)
|
|
234
|
+
combined_df["date-time"] = combined_df["date-time"].astype("Int64")
|
|
235
|
+
combined_df = combined_df.reindex(columns=["date-time", "value", "quality-code"])
|
|
236
|
+
|
|
237
|
+
# Replace NaN in value column with None so they serialize as JSON null
|
|
238
|
+
# rather than the invalid JSON literal NaN.
|
|
239
|
+
combined_df["value"] = (
|
|
240
|
+
combined_df["value"]
|
|
241
|
+
.astype(object)
|
|
242
|
+
.where(combined_df["value"].notna(), other=None)
|
|
243
|
+
)
|
|
231
244
|
# Update the "values" key in the JSON to include the combined data
|
|
232
|
-
combined_json["values"] = combined_df.
|
|
245
|
+
combined_json["values"] = combined_df.values.tolist()
|
|
233
246
|
|
|
234
247
|
# Return a new cwms Data object with the combined DataFrame and updated metadata
|
|
235
248
|
return Data(combined_json, selector="values")
|
|
@@ -433,8 +446,11 @@ def timeseries_df_to_json(
|
|
|
433
446
|
pd.Timestamp.isoformat
|
|
434
447
|
)
|
|
435
448
|
df = df.reindex(columns=["date-time", "value", "quality-code"])
|
|
436
|
-
|
|
437
|
-
|
|
449
|
+
|
|
450
|
+
# Replace NaN/NA/NaT in value column with None so they serialize as JSON
|
|
451
|
+
# null rather than the invalid JSON literal NaN.
|
|
452
|
+
df["value"] = df["value"].astype(object).where(df["value"].notna(), other=None)
|
|
453
|
+
|
|
438
454
|
if version_date:
|
|
439
455
|
version_date_iso = version_date.isoformat()
|
|
440
456
|
else:
|
|
@@ -453,14 +469,56 @@ def timeseries_df_to_json(
|
|
|
453
469
|
def store_multi_timeseries_df(
|
|
454
470
|
data: pd.DataFrame,
|
|
455
471
|
office_id: str,
|
|
472
|
+
create_as_ltrs: Optional[bool] = False,
|
|
473
|
+
store_rule: Optional[str] = None,
|
|
474
|
+
override_protection: Optional[bool] = False,
|
|
475
|
+
multithread: Optional[bool] = True,
|
|
456
476
|
max_workers: Optional[int] = 30,
|
|
457
477
|
) -> None:
|
|
478
|
+
"""stored mulitple timeseries from a dataframe. The dataframe must be a metled dataframe with columns
|
|
479
|
+
for date-time, value, quality-code(optional), ts_id, units, and version_date(optional). The dataframe will
|
|
480
|
+
be grouped by ts_id and version_date and each group will be posted as a separate timeseries using the store_timeseries
|
|
481
|
+
function. If version_date column is not included then all data will be stored as unversioned data. If version_date
|
|
482
|
+
column is included then data will be grouped by ts_id and version_date and stored as versioned timeseries with the
|
|
483
|
+
version date specified in the version_date column.
|
|
484
|
+
|
|
485
|
+
Parameters
|
|
486
|
+
----------
|
|
487
|
+
data: dataframe
|
|
488
|
+
Time Series data to be stored. Dataframe must be melted with columns for date-time, value, quality-code(optional),
|
|
489
|
+
ts_id, units, and version_date(optional).
|
|
490
|
+
date-time value quality-code ts_id units version_date
|
|
491
|
+
0 2023-12-20T14:45:00.000-05:00 93.1 0 OMA.Stage.Inst.6Hours.0.Fcst-MRBWM-GRFT ft 2024-04-22 07:00:00-05:00
|
|
492
|
+
1 2023-12-20T15:00:00.000-05:00 99.8 0 OMA.Stage.Inst.6Hours.0.Fcst-MRBWM-GRFT ft 2024-04-22 07:00:00-05:00
|
|
493
|
+
2 2023-12-20T15:15:00.000-05:00 98.5 0 OMA.Stage.Inst.6Hours.0.Fcst-MRBWM-GRFT ft 2024-04-22 07:15:00-05:00
|
|
494
|
+
office_id: string
|
|
495
|
+
The owning office of the time series(s).
|
|
496
|
+
create_as_ltrs: bool, optional, default is False
|
|
497
|
+
Flag indicating if timeseries should be created as Local Regular Time Series.
|
|
498
|
+
store_rule: str, optional, default is None:
|
|
499
|
+
The business rule to use when merging the incoming with existing data. Available values :
|
|
500
|
+
REPLACE_ALL,
|
|
501
|
+
DO_NOT_REPLACE,
|
|
502
|
+
REPLACE_MISSING_VALUES_ONLY,
|
|
503
|
+
REPLACE_WITH_NON_MISSING,
|
|
504
|
+
DELETE_INSERT.
|
|
505
|
+
override_protection: bool, optional, default is False
|
|
506
|
+
A flag to ignore the protected data quality flag when storing data.
|
|
507
|
+
multithread: bool, default is false
|
|
508
|
+
Specifies whether to store chunked time series values using multiple threads.
|
|
509
|
+
max_workers: Int, Optional, default is None
|
|
510
|
+
It is a number of Threads aka size of pool in concurrent.futures.ThreadPoolExecutor.
|
|
511
|
+
|
|
512
|
+
Returns
|
|
513
|
+
-------
|
|
514
|
+
None
|
|
515
|
+
"""
|
|
516
|
+
|
|
458
517
|
def store_ts_ids(
|
|
459
518
|
data: pd.DataFrame,
|
|
460
519
|
ts_id: str,
|
|
461
520
|
office_id: str,
|
|
462
521
|
version_date: Optional[datetime] = None,
|
|
463
|
-
multithread: bool = False,
|
|
464
522
|
) -> None:
|
|
465
523
|
try:
|
|
466
524
|
units = data["units"].iloc[0]
|
|
@@ -471,11 +529,23 @@ def store_multi_timeseries_df(
|
|
|
471
529
|
office_id=office_id,
|
|
472
530
|
version_date=version_date,
|
|
473
531
|
)
|
|
474
|
-
store_timeseries(
|
|
532
|
+
store_timeseries(
|
|
533
|
+
data=data_json,
|
|
534
|
+
create_as_ltrs=create_as_ltrs,
|
|
535
|
+
store_rule=store_rule,
|
|
536
|
+
override_protection=override_protection,
|
|
537
|
+
multithread=multithread,
|
|
538
|
+
)
|
|
475
539
|
except Exception as e:
|
|
476
540
|
print(f"Error processing {ts_id}: {e}")
|
|
477
541
|
return None
|
|
478
542
|
|
|
543
|
+
required_columns = ["date-time", "value", "ts_id", "units"]
|
|
544
|
+
for col in required_columns:
|
|
545
|
+
if col not in data.columns:
|
|
546
|
+
raise TypeError(
|
|
547
|
+
f"{col} is a required column in data when posting multiple timeseries from a dataframe. Make sure you are using a melted dataframe with columns for date-time, value, quality-code(optional), ts_id, units, and version_date(optional)."
|
|
548
|
+
)
|
|
479
549
|
ts_data_all = data.copy()
|
|
480
550
|
if "version_date" not in ts_data_all.columns:
|
|
481
551
|
ts_data_all = ts_data_all.assign(version_date=pd.to_datetime(pd.Series([])))
|
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Any, List, Optional
|
|
3
|
+
|
|
4
|
+
import cwms.api as api
|
|
5
|
+
from cwms.cwms_types import Data
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def _raise_user_management_error(error: api.ApiError, action: str) -> None:
|
|
9
|
+
status_code = getattr(error.response, "status_code", None)
|
|
10
|
+
if status_code == 403:
|
|
11
|
+
response_hint = getattr(error.response, "reason", None) or "Forbidden"
|
|
12
|
+
message = (
|
|
13
|
+
f"{action} could not be completed because the current credentials "
|
|
14
|
+
"are not authorized for user-management access or are missing the "
|
|
15
|
+
f"required role assignment. CDA responded with 403 {response_hint}."
|
|
16
|
+
)
|
|
17
|
+
raise api.PermissionError(error.response, message) from None
|
|
18
|
+
raise error
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def get_roles() -> List[str]:
|
|
22
|
+
"""Retrieve all available user-management roles."""
|
|
23
|
+
|
|
24
|
+
try:
|
|
25
|
+
response = api.get("roles", api_version=1)
|
|
26
|
+
except api.ApiError as error:
|
|
27
|
+
_raise_user_management_error(error, "User role lookup")
|
|
28
|
+
return list(response)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def get_user_profile() -> dict[str, Any]:
|
|
32
|
+
"""Retrieve the profile for the currently authenticated user."""
|
|
33
|
+
|
|
34
|
+
response = api.get("user/profile", api_version=1)
|
|
35
|
+
return dict(response)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def filter_users_by_office(data: dict[str, Any], office: str) -> dict[str, Any]:
|
|
39
|
+
"""
|
|
40
|
+
Filter users JSON to only include users that have roles for the specified office.
|
|
41
|
+
Each user's roles dict will only contain the entry for that office.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
data: The full users JSON as a Python dict.
|
|
45
|
+
office: The office key to filter by (e.g., 'MVP', 'LRL').
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
A new dict with the same structure, filtered to the specified office.
|
|
49
|
+
"""
|
|
50
|
+
filtered_users = []
|
|
51
|
+
|
|
52
|
+
for user in data.get("users", []):
|
|
53
|
+
roles = user.get("roles", {})
|
|
54
|
+
|
|
55
|
+
if office in roles:
|
|
56
|
+
# Build a copy of the user with only the target office's roles
|
|
57
|
+
filtered_user = {k: v for k, v in user.items() if k != "roles"}
|
|
58
|
+
filtered_user["roles"] = {office: roles[office]}
|
|
59
|
+
filtered_users.append(filtered_user)
|
|
60
|
+
|
|
61
|
+
return {
|
|
62
|
+
"page": data.get("page"),
|
|
63
|
+
"page-size": data.get("page-size"),
|
|
64
|
+
"total": len(filtered_users),
|
|
65
|
+
"users": filtered_users,
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def get_users(
|
|
70
|
+
office_id: Optional[str] = None,
|
|
71
|
+
username_like: Optional[str] = None,
|
|
72
|
+
include_roles: Optional[bool] = None,
|
|
73
|
+
page: Optional[str] = None,
|
|
74
|
+
page_size: Optional[int] = 5000,
|
|
75
|
+
) -> Data:
|
|
76
|
+
"""Retrieve users with optional office and paging filters."""
|
|
77
|
+
|
|
78
|
+
endpoint = "users"
|
|
79
|
+
params = {
|
|
80
|
+
"office": office_id,
|
|
81
|
+
"username-like": username_like,
|
|
82
|
+
"include-roles": include_roles,
|
|
83
|
+
"page": page,
|
|
84
|
+
"page-size": page_size,
|
|
85
|
+
}
|
|
86
|
+
try:
|
|
87
|
+
response = api.get_with_paging(
|
|
88
|
+
endpoint=endpoint, selector="users", params=params, api_version=1
|
|
89
|
+
)
|
|
90
|
+
except api.ApiError as error:
|
|
91
|
+
_raise_user_management_error(error, "User list lookup")
|
|
92
|
+
|
|
93
|
+
# filter by office if office_id is provided since the API does not
|
|
94
|
+
# currently support filtering by office on the backend. This is a
|
|
95
|
+
# temporary workaround until the API supports office filtering.
|
|
96
|
+
if office_id:
|
|
97
|
+
response = filter_users_by_office(response, office_id)
|
|
98
|
+
return Data(response, selector="users")
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def get_user(user_name: str) -> dict[str, Any]:
|
|
102
|
+
"""Retrieve a single user by user name."""
|
|
103
|
+
|
|
104
|
+
if not user_name:
|
|
105
|
+
raise ValueError("Get user requires a user name")
|
|
106
|
+
try:
|
|
107
|
+
response = api.get(f"users/{user_name}", api_version=1)
|
|
108
|
+
except api.ApiError as error:
|
|
109
|
+
status_code = getattr(error.response, "status_code", None)
|
|
110
|
+
if status_code == 404:
|
|
111
|
+
raise api.NotFoundError(
|
|
112
|
+
error.response, f"User '{user_name}' was not found."
|
|
113
|
+
) from None
|
|
114
|
+
if status_code == 403:
|
|
115
|
+
_raise_user_management_error(error, f"User '{user_name}' retrieval")
|
|
116
|
+
raise
|
|
117
|
+
return dict(response)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def store_user(user_name: str, office_id: str, roles: List[str]) -> None:
|
|
121
|
+
"""Create a user role assignment for an office.
|
|
122
|
+
|
|
123
|
+
Notes
|
|
124
|
+
-----
|
|
125
|
+
The CDA User Management API creates/manages user access through role assignment
|
|
126
|
+
at `/user/{user-name}/roles/{office-id}`.
|
|
127
|
+
"""
|
|
128
|
+
|
|
129
|
+
if not user_name:
|
|
130
|
+
raise ValueError("Store user requires a user name")
|
|
131
|
+
if not office_id:
|
|
132
|
+
raise ValueError("Store user requires an office id")
|
|
133
|
+
if not roles:
|
|
134
|
+
raise ValueError("Store user requires a roles list")
|
|
135
|
+
|
|
136
|
+
endpoint = f"user/{user_name}/roles/{office_id}"
|
|
137
|
+
try:
|
|
138
|
+
api.post(endpoint, roles)
|
|
139
|
+
except api.ApiError as error:
|
|
140
|
+
_raise_user_management_error(
|
|
141
|
+
error, f"User '{user_name}' role assignment update"
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def delete_user_roles(user_name: str, office_id: str, roles: List[str]) -> None:
|
|
146
|
+
"""Delete user role assignments for an office."""
|
|
147
|
+
|
|
148
|
+
if not user_name:
|
|
149
|
+
raise ValueError("Delete user roles requires a user name")
|
|
150
|
+
if not office_id:
|
|
151
|
+
raise ValueError("Delete user roles requires an office id")
|
|
152
|
+
if roles is None:
|
|
153
|
+
raise ValueError("Delete user roles requires a roles list")
|
|
154
|
+
|
|
155
|
+
endpoint = f"user/{user_name}/roles/{office_id}"
|
|
156
|
+
headers = {"accept": "*/*", "Content-Type": api.api_version_text(api.API_VERSION)}
|
|
157
|
+
# TODO: Delete does not currently support a body in the api module. Use SESSION directly
|
|
158
|
+
with api.SESSION.delete(
|
|
159
|
+
endpoint, headers=headers, data=json.dumps(roles)
|
|
160
|
+
) as response:
|
|
161
|
+
if not response.ok:
|
|
162
|
+
_raise_user_management_error(
|
|
163
|
+
api.ApiError(response), f"User '{user_name}' role deletion"
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def update_user(user_name: str, office_id: str, roles: List[str]) -> None:
|
|
168
|
+
"""Update a user's roles for an office by replacing the current role set."""
|
|
169
|
+
|
|
170
|
+
if not user_name:
|
|
171
|
+
raise ValueError("Update user requires a user name")
|
|
172
|
+
if not office_id:
|
|
173
|
+
raise ValueError("Update user requires an office id")
|
|
174
|
+
if not roles:
|
|
175
|
+
raise ValueError("Update user requires a roles list")
|
|
176
|
+
|
|
177
|
+
endpoint = f"user/{user_name}/roles/{office_id}"
|
|
178
|
+
user = get_user(user_name)
|
|
179
|
+
|
|
180
|
+
roles_by_office = user.get("roles")
|
|
181
|
+
if isinstance(roles_by_office, dict):
|
|
182
|
+
existing_roles = roles_by_office.get(office_id, [])
|
|
183
|
+
elif isinstance(roles_by_office, list):
|
|
184
|
+
existing_roles = roles_by_office
|
|
185
|
+
else:
|
|
186
|
+
existing_roles = []
|
|
187
|
+
|
|
188
|
+
if not isinstance(existing_roles, list):
|
|
189
|
+
existing_roles = []
|
|
190
|
+
|
|
191
|
+
desired_roles = sorted(set(roles))
|
|
192
|
+
current_roles = sorted(set(existing_roles))
|
|
193
|
+
# Determine roles to add and remove
|
|
194
|
+
roles_to_remove = [role for role in current_roles if role not in desired_roles]
|
|
195
|
+
roles_to_add = [role for role in desired_roles if role not in current_roles]
|
|
196
|
+
|
|
197
|
+
if roles_to_remove:
|
|
198
|
+
delete_user_roles(user_name, office_id, roles_to_remove)
|
|
199
|
+
if roles_to_add:
|
|
200
|
+
try:
|
|
201
|
+
api.post(endpoint, roles_to_add)
|
|
202
|
+
except api.ApiError as error:
|
|
203
|
+
_raise_user_management_error(error, f"User '{user_name}' role replacement")
|
|
@@ -8,3 +8,15 @@ def is_base64(s: str) -> bool:
|
|
|
8
8
|
return base64.b64encode(decoded).decode("utf-8") == s
|
|
9
9
|
except (ValueError, TypeError):
|
|
10
10
|
return False
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def has_invalid_chars(id: str) -> bool:
|
|
14
|
+
"""
|
|
15
|
+
Checks if ID contains any invalid web path characters.
|
|
16
|
+
"""
|
|
17
|
+
INVALID_PATH_CHARS = ["/", "\\", "&", "?", "="]
|
|
18
|
+
|
|
19
|
+
for char in INVALID_PATH_CHARS:
|
|
20
|
+
if char in id:
|
|
21
|
+
return True
|
|
22
|
+
return False
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|