cwms-python 0.7.1__tar.gz → 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. {cwms_python-0.7.1 → cwms_python-1.0.0}/PKG-INFO +4 -2
  2. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/__init__.py +1 -0
  3. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/api.py +76 -29
  4. cwms_python-1.0.0/cwms/catalog/blobs.py +148 -0
  5. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/catalog/catalog.py +35 -1
  6. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/cwms_types.py +41 -0
  7. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/levels/location_levels.py +34 -5
  8. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/locations/location_groups.py +85 -1
  9. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/locations/physical_locations.py +7 -3
  10. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/measurements/measurements.py +34 -2
  11. cwms_python-1.0.0/cwms/projects/water_supply/accounting.py +147 -0
  12. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/ratings/ratings.py +219 -1
  13. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/ratings/ratings_spec.py +16 -4
  14. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/timeseries/timeseries.py +289 -17
  15. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/timeseries/timeseries_group.py +1 -1
  16. {cwms_python-0.7.1 → cwms_python-1.0.0}/pyproject.toml +1 -1
  17. cwms_python-0.7.1/cwms/catalog/blobs.py +0 -99
  18. {cwms_python-0.7.1 → cwms_python-1.0.0}/LICENSE +0 -0
  19. {cwms_python-0.7.1 → cwms_python-1.0.0}/README.md +0 -0
  20. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/catalog/clobs.py +0 -0
  21. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/forecast/forecast_instance.py +0 -0
  22. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/forecast/forecast_spec.py +0 -0
  23. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/levels/specified_levels.py +0 -0
  24. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/locations/gate_changes.py +0 -0
  25. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/outlets/outlets.py +0 -0
  26. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/outlets/virtual_outlets.py +0 -0
  27. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/projects/project_lock_rights.py +0 -0
  28. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/projects/project_locks.py +0 -0
  29. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/projects/projects.py +0 -0
  30. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/ratings/ratings_template.py +0 -0
  31. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/standard_text/standard_text.py +0 -0
  32. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/timeseries/timeseries_bin.py +0 -0
  33. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/timeseries/timeseries_identifier.py +0 -0
  34. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/timeseries/timeseries_profile.py +0 -0
  35. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/timeseries/timeseries_profile_instance.py +0 -0
  36. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/timeseries/timeseries_profile_parser.py +0 -0
  37. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/timeseries/timeseries_txt.py +0 -0
  38. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/turbines/turbines.py +0 -0
  39. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/utils/__init__.py +0 -0
  40. {cwms_python-0.7.1 → cwms_python-1.0.0}/cwms/utils/checks.py +0 -0
@@ -1,8 +1,9 @@
1
- Metadata-Version: 2.3
1
+ Metadata-Version: 2.4
2
2
  Name: cwms-python
3
- Version: 0.7.1
3
+ Version: 1.0.0
4
4
  Summary: Corps water management systems (CWMS) REST API for Data Retrieval of USACE water data
5
5
  License: LICENSE
6
+ License-File: LICENSE
6
7
  Keywords: USACE,water data,CWMS
7
8
  Author: Eric Novotny
8
9
  Author-email: eric.v.novotny@usace.army.mil
@@ -14,6 +15,7 @@ Classifier: Programming Language :: Python :: 3.10
14
15
  Classifier: Programming Language :: Python :: 3.11
15
16
  Classifier: Programming Language :: Python :: 3.12
16
17
  Classifier: Programming Language :: Python :: 3.13
18
+ Classifier: Programming Language :: Python :: 3.14
17
19
  Requires-Dist: pandas (>=2.1.3,<3.0.0)
18
20
  Requires-Dist: requests (>=2.31.0,<3.0.0)
19
21
  Requires-Dist: requests-toolbelt (>=1.0.0,<2.0.0)
@@ -17,6 +17,7 @@ from cwms.outlets.virtual_outlets import *
17
17
  from cwms.projects.project_lock_rights import *
18
18
  from cwms.projects.project_locks import *
19
19
  from cwms.projects.projects import *
20
+ from cwms.projects.water_supply.accounting import *
20
21
  from cwms.ratings.ratings import *
21
22
  from cwms.ratings.ratings_spec import *
22
23
  from cwms.ratings.ratings_template import *
@@ -131,8 +131,9 @@ def init_session(
131
131
  """
132
132
 
133
133
  global SESSION
134
-
135
134
  if api_root:
135
+ # Ensure the API_ROOT ends with a single slash
136
+ api_root = api_root.rstrip("/") + "/"
136
137
  logging.debug(f"Initializing root URL: api_root={api_root}")
137
138
  SESSION = sessions.BaseUrlSession(base_url=api_root)
138
139
  adapter = adapters.HTTPAdapter(
@@ -142,8 +143,10 @@ def init_session(
142
143
  )
143
144
  SESSION.mount("https://", adapter)
144
145
  if api_key:
146
+ if api_key.startswith("apikey "):
147
+ api_key = api_key.replace("apikey ", "")
145
148
  logging.debug(f"Setting authorization key: api_key={api_key}")
146
- SESSION.headers.update({"Authorization": api_key})
149
+ SESSION.headers.update({"Authorization": "apikey " + api_key})
147
150
 
148
151
  return SESSION
149
152
 
@@ -212,6 +215,27 @@ def get_xml(
212
215
  return get(endpoint=endpoint, params=params, api_version=api_version)
213
216
 
214
217
 
218
+ def _process_response(response: Response) -> Any:
219
+ try:
220
+ # Avoid case sensitivity issues with the content type header
221
+ content_type = response.headers.get("Content-Type", "").lower()
222
+ # Most CDA content is JSON
223
+ if "application/json" in content_type or not content_type:
224
+ return cast(JSON, response.json())
225
+ # Use automatic charset detection with .text
226
+ if "text/plain" in content_type or "text/" in content_type:
227
+ return response.text
228
+ if content_type.startswith("image/"):
229
+ return base64.b64encode(response.content).decode("utf-8")
230
+ # Fallback for remaining content types
231
+ return response.content.decode("utf-8")
232
+ except JSONDecodeError as error:
233
+ logging.error(
234
+ f"Error decoding CDA response as JSON: {error} on line {error.lineno}\n\tFalling back to text"
235
+ )
236
+ return response.text
237
+
238
+
215
239
  def get(
216
240
  endpoint: str,
217
241
  params: Optional[RequestParams] = None,
@@ -240,24 +264,7 @@ def get(
240
264
  if not response.ok:
241
265
  logging.error(f"CDA Error: response={response}")
242
266
  raise ApiError(response)
243
- try:
244
- # Avoid case sensitivity issues with the content type header
245
- content_type = response.headers.get("Content-Type", "").lower()
246
- # Most CDA content is JSON
247
- if "application/json" in content_type or not content_type:
248
- return cast(JSON, response.json())
249
- # Use automatic charset detection with .text
250
- if "text/plain" in content_type or "text/" in content_type:
251
- return response.text
252
- if content_type.startswith("image/"):
253
- return base64.b64encode(response.content).decode("utf-8")
254
- # Fallback for remaining content types
255
- return response.content.decode("utf-8")
256
- except JSONDecodeError as error:
257
- logging.error(
258
- f"Error decoding CDA response as JSON: {error} on line {error.lineno}\n\tFalling back to text"
259
- )
260
- return response.text
267
+ return _process_response(response)
261
268
 
262
269
 
263
270
  def get_with_paging(
@@ -300,6 +307,25 @@ def get_with_paging(
300
307
  return response
301
308
 
302
309
 
310
+ def _post_function(
311
+ endpoint: str,
312
+ data: Any,
313
+ params: Optional[RequestParams] = None,
314
+ *,
315
+ api_version: int = API_VERSION,
316
+ ) -> Any:
317
+
318
+ # post requires different headers than get for
319
+ headers = {"accept": "*/*", "Content-Type": api_version_text(api_version)}
320
+ if isinstance(data, dict) or isinstance(data, list):
321
+ data = json.dumps(data)
322
+ with SESSION.post(endpoint, params=params, headers=headers, data=data) as response:
323
+ if not response.ok:
324
+ logging.error(f"CDA Error: response={response}")
325
+ raise ApiError(response)
326
+ return response
327
+
328
+
303
329
  def post(
304
330
  endpoint: str,
305
331
  data: Any,
@@ -319,22 +345,43 @@ def post(
319
345
  the default API_VERSION will be used.
320
346
 
321
347
  Returns:
322
- The deserialized JSON response data.
348
+ None
323
349
 
324
350
  Raises:
325
351
  ApiError: If an error response is return by the API.
326
352
  """
353
+ _post_function(endpoint=endpoint, data=data, params=params, api_version=api_version)
327
354
 
328
- # post requires different headers than get for
329
- headers = {"accept": "*/*", "Content-Type": api_version_text(api_version)}
330
355
 
331
- if isinstance(data, dict) or isinstance(data, list):
332
- data = json.dumps(data)
356
+ def post_with_returned_data(
357
+ endpoint: str,
358
+ data: Any,
359
+ params: Optional[RequestParams] = None,
360
+ *,
361
+ api_version: int = API_VERSION,
362
+ ) -> Any:
363
+ """Make a POST request to the CWMS Data API.
333
364
 
334
- with SESSION.post(endpoint, params=params, headers=headers, data=data) as response:
335
- if not response.ok:
336
- logging.error(f"CDA Error: response={response}")
337
- raise ApiError(response)
365
+ Args:
366
+ endpoint: The CDA endpoint for the record type.
367
+ data: A dict containing the new record data. Must be JSON-serializable.
368
+ params (optional): Query parameters for the request.
369
+
370
+ Keyword Args:
371
+ api_version (optional): The CDA version to use for the request. If not specified,
372
+ the default API_VERSION will be used.
373
+
374
+ Returns:
375
+ The response data.
376
+
377
+ Raises:
378
+ ApiError: If an error response is return by the API.
379
+ """
380
+
381
+ response = _post_function(
382
+ endpoint=endpoint, data=data, params=params, api_version=api_version
383
+ )
384
+ return _process_response(response)
338
385
 
339
386
 
340
387
  def patch(
@@ -0,0 +1,148 @@
1
+ import base64
2
+ from typing import Optional
3
+
4
+ import cwms.api as api
5
+ from cwms.cwms_types import JSON, Data
6
+ from cwms.utils.checks import is_base64
7
+
8
+ STORE_DICT = """data = {
9
+ "office-id": "SWT",
10
+ "id": "MYFILE_OR_BLOB_ID.TXT",
11
+ "description": "Your description here",
12
+ "media-type-id": "text/plain",
13
+ "value": "STRING of content or BASE64_ENCODED_STRING"
14
+ }
15
+ """
16
+
17
+
18
+ def get_blob(blob_id: str, office_id: str) -> str:
19
+ """Get a single BLOB (Binary Large Object).
20
+
21
+ Parameters
22
+ blob_id: string
23
+ Specifies the id of the blob. ALL blob ids are UPPERCASE.
24
+ office_id: string
25
+ Specifies the office of the blob.
26
+
27
+
28
+ Returns
29
+ str: the value returned based on the content-type it was stored with as a string
30
+ """
31
+
32
+ endpoint = f"blobs/{blob_id}"
33
+ params = {"office": office_id}
34
+ response = api.get(endpoint, params, api_version=1)
35
+ return str(response)
36
+
37
+
38
+ def get_blobs(
39
+ office_id: Optional[str] = None,
40
+ page_size: Optional[int] = 100,
41
+ blob_id_like: Optional[str] = None,
42
+ ) -> Data:
43
+ """Get a subset of Blobs
44
+
45
+ Parameters:
46
+ office_id: Optional[string]
47
+ Specifies the office of the blob.
48
+ page_sie: Optional[Integer]
49
+ How many entries per page returned. Default 100.
50
+ blob_id_like: Optional[string]
51
+ Posix regular expression matching against the clob id
52
+
53
+ Returns:
54
+ cwms data type. data.json will return the JSON output and data.df will return a dataframe
55
+ """
56
+
57
+ endpoint = "blobs"
58
+ params = {"office": office_id, "page-size": page_size, "like": blob_id_like}
59
+
60
+ response = api.get(endpoint, params, api_version=2)
61
+ return Data(response, selector="blobs")
62
+
63
+
64
+ def store_blobs(data: JSON, fail_if_exists: Optional[bool] = True) -> None:
65
+ """Create New Blob
66
+
67
+ Parameters:
68
+ **Note**: The "id" field is automatically cast to uppercase.
69
+
70
+ Data: JSON dictionary
71
+ JSON containing information of Blob to be updated.
72
+
73
+ fail_if_exists: Boolean
74
+ Create will fail if the provided ID already exists. Default: True
75
+
76
+ Returns:
77
+ None
78
+ """
79
+
80
+ if not isinstance(data, dict):
81
+ raise ValueError(
82
+ f"Cannot store a Blob without a JSON data dictionary:\n{STORE_DICT}"
83
+ )
84
+
85
+ # Encode value if it's not already Base64-encoded
86
+ if "value" in data and not is_base64(data["value"]):
87
+ # Encode to bytes, then Base64, then decode to string for storing
88
+ data["value"] = base64.b64encode(data["value"].encode("utf-8")).decode("utf-8")
89
+
90
+ endpoint = "blobs"
91
+ params = {"fail-if-exists": fail_if_exists}
92
+ return api.post(endpoint, data, params, api_version=1)
93
+
94
+
95
+ def delete_blob(blob_id: str, office_id: str) -> None:
96
+ """Delete a single BLOB.
97
+
98
+ Parameters
99
+ ----------
100
+ blob_id: string
101
+ Specifies the id of the blob. ALL blob ids are UPPERCASE.
102
+ office_id: string
103
+ Specifies the office of the blob.
104
+
105
+ Returns
106
+ -------
107
+ None
108
+ """
109
+
110
+ endpoint = f"blobs/{blob_id}"
111
+ params = {"office": office_id}
112
+ return api.delete(endpoint, params, api_version=1)
113
+
114
+
115
+ def update_blob(data: JSON, fail_if_not_exists: Optional[bool] = True) -> None:
116
+ """Update Existing Blob
117
+
118
+ Parameters:
119
+ **Note**: The "id" field is automatically cast to uppercase.
120
+
121
+ Data: JSON dictionary
122
+ JSON containing information of Blob to be updated.
123
+
124
+ fail_if_not_exists: Boolean
125
+ Update will fail if the provided ID does not already exist. Default: True
126
+
127
+ Returns:
128
+ None
129
+ """
130
+
131
+ if not data:
132
+ raise ValueError(
133
+ f"Cannot update a Blob without a JSON data dictionary:\n{STORE_DICT}"
134
+ )
135
+
136
+ if "id" not in data:
137
+ raise ValueError(f"Cannot update a Blob without an 'id' field:\n{STORE_DICT}")
138
+
139
+ # Encode value if it's not already Base64-encoded
140
+ if "value" in data and not is_base64(data["value"]):
141
+ # Encode to bytes, then Base64, then decode to string for storing
142
+ data["value"] = base64.b64encode(data["value"].encode("utf-8")).decode("utf-8")
143
+
144
+ blob_id = data.get("id", "").upper()
145
+
146
+ endpoint = f"blobs/{blob_id}"
147
+ params = {"fail-if-not-exists": fail_if_not_exists}
148
+ return api.patch(endpoint, data, params, api_version=1)
@@ -1,4 +1,7 @@
1
- from typing import Optional
1
+ from datetime import datetime
2
+ from typing import Optional, Tuple
3
+
4
+ import pandas as pd
2
5
 
3
6
  import cwms.api as api
4
7
  from cwms.cwms_types import Data
@@ -130,3 +133,34 @@ def get_timeseries_catalog(
130
133
 
131
134
  response = api.get(endpoint=endpoint, params=params, api_version=2)
132
135
  return Data(response, selector="entries")
136
+
137
+
138
+ def get_ts_extents(ts_id: str, office_id: str) -> Tuple[datetime, datetime, datetime]:
139
+ """Retrieves earliest extent, latest extent, and last update via cwms.get_timeseries_catalog
140
+
141
+ Parameters
142
+ ----------
143
+ ts_id: string
144
+ Timseries id to query.
145
+ office_id: string
146
+ The owning office of the timeseries group.
147
+
148
+ Returns
149
+ -------
150
+ tuple of datetime objects (earliest_time, latest_time, last_update)
151
+ """
152
+ cwms_cat = get_timeseries_catalog(
153
+ office_id=office_id,
154
+ like=ts_id,
155
+ timeseries_group_like=None,
156
+ page_size=500,
157
+ include_extents=True,
158
+ ).df
159
+
160
+ times = cwms_cat[cwms_cat.name == ts_id].extents.values[0][0]
161
+
162
+ earliest_time = pd.to_datetime(times["earliest-time"])
163
+ latest_time = pd.to_datetime(times["latest-time"])
164
+ last_update = pd.to_datetime(times["last-update"])
165
+
166
+ return earliest_time, latest_time, last_update
@@ -79,6 +79,44 @@ class Data:
79
79
  df["date-time"] = to_datetime(df["date-time"], unit="ms", utc=True)
80
80
  return df
81
81
 
82
+ def reorder_measurement_cols(df: DataFrame) -> DataFrame:
83
+ # reorders measurement columns for usability
84
+
85
+ # Define the columns to bring to the front
86
+ front_columns = [
87
+ "id.office-id",
88
+ "id.name",
89
+ "number",
90
+ "instant",
91
+ "streamflow-measurement.gage-height",
92
+ "streamflow-measurement.flow",
93
+ "streamflow-measurement.quality",
94
+ "used",
95
+ "agency",
96
+ "wm-comments",
97
+ ]
98
+
99
+ # Identify columns containing 'unit' to be last
100
+ unit_columns = [col for col in df.columns if "unit" in col]
101
+
102
+ # Identify remaining columns (not in front_columns or unit_columns)
103
+ remaining_columns = [
104
+ col
105
+ for col in df.columns
106
+ if col not in front_columns and col not in unit_columns
107
+ ]
108
+
109
+ # Construct the new column order
110
+ new_column_order = front_columns + remaining_columns + unit_columns
111
+
112
+ # Filter out columns that might not actually exist in the DataFrame.
113
+ existing_columns = [col for col in new_column_order if col in df.columns]
114
+
115
+ # Reorder the DataFrame
116
+ df = df[existing_columns]
117
+
118
+ return df
119
+
82
120
  data = deepcopy(json)
83
121
 
84
122
  if selector:
@@ -95,6 +133,9 @@ class Data:
95
133
  df = json_normalize(df_data) if df_data else DataFrame()
96
134
  else:
97
135
  df = json_normalize(data)
136
+ # if streamflow-measurement reorder columns
137
+ if "streamflow-measurement.flow" in df.columns:
138
+ df = reorder_measurement_cols(df)
98
139
 
99
140
  return df
100
141
 
@@ -13,7 +13,7 @@ from cwms.cwms_types import JSON, Data
13
13
 
14
14
 
15
15
  def get_location_levels(
16
- level_id_mask: str = "*",
16
+ level_id_mask: Optional[str] = None,
17
17
  office_id: Optional[str] = None,
18
18
  unit: Optional[str] = None,
19
19
  datum: Optional[str] = None,
@@ -58,13 +58,13 @@ def get_location_levels(
58
58
  "level-id-mask": level_id_mask,
59
59
  "unit": unit,
60
60
  "datum": datum,
61
- "begin": begin.isoformat() if begin else "",
62
- "end": end.isoformat() if end else "",
61
+ "begin": begin.isoformat() if begin else None,
62
+ "end": end.isoformat() if end else None,
63
63
  "page": page,
64
64
  "page-size": page_size,
65
65
  }
66
- response = api.get(endpoint, params)
67
- return Data(response)
66
+ response = api.get(endpoint=endpoint, params=params)
67
+ return Data(json=response, selector="levels")
68
68
 
69
69
 
70
70
  def get_location_level(
@@ -169,6 +169,35 @@ def delete_location_level(
169
169
  return api.delete(endpoint, params)
170
170
 
171
171
 
172
+ def update_location_level(
173
+ data: JSON, level_id: str, effective_date: Optional[datetime] = None
174
+ ) -> None:
175
+ """
176
+ Parameters
177
+ ----------
178
+ data : dict
179
+ The JSON data dictionary containing the updated location level information.
180
+ level_id : str
181
+ The ID of the location level to be updated.
182
+ effective_date : datetime, optional
183
+ The effective date of the location level to be updated.
184
+ If the datetime has a timezone it will be used, otherwise it is assumed to be in UTC.
185
+
186
+ """
187
+ if data is None:
188
+ raise ValueError(
189
+ "Cannot update a location level without a JSON data dictionary"
190
+ )
191
+ if level_id is None:
192
+ raise ValueError("Cannot update a location level without an id")
193
+ endpoint = f"levels/{level_id}"
194
+
195
+ params = {
196
+ "effective-date": (effective_date.isoformat() if effective_date else None),
197
+ }
198
+ return api.patch(endpoint, data, params)
199
+
200
+
172
201
  def get_level_as_timeseries(
173
202
  location_level_id: str,
174
203
  office_id: str,
@@ -85,6 +85,84 @@ def get_location_groups(
85
85
  return Data(response)
86
86
 
87
87
 
88
+ def location_group_df_to_json(
89
+ data: pd.DataFrame,
90
+ group_id: str,
91
+ group_office_id: str,
92
+ category_office_id: str,
93
+ category_id: str,
94
+ ) -> JSON:
95
+ """
96
+ Converts a dataframe to a json dictionary in the correct format.
97
+
98
+ Parameters
99
+ ----------
100
+ data: pd.DataFrame
101
+ Dataframe containing timeseries information.
102
+ group_id: str
103
+ The group ID for the timeseries.
104
+ office_id: str
105
+ The ID of the office associated with the specified timeseries.
106
+ category_id: str
107
+ The ID of the category associated with the group
108
+
109
+ Returns
110
+ -------
111
+ JSON
112
+ JSON dictionary of the timeseries data.
113
+ """
114
+ df = data.copy()
115
+ required_columns = ["office-id", "location-id"]
116
+ optional_columns = ["alias-id", "attribute", "ref-location-id"]
117
+ for column in required_columns:
118
+ if column not in df.columns:
119
+ raise TypeError(
120
+ f"{column} is a required column in data when posting as a dataframe"
121
+ )
122
+
123
+ if df[required_columns].isnull().any().any():
124
+ raise ValueError(
125
+ f"Null/NaN values found in required columns: {required_columns}. "
126
+ )
127
+
128
+ # Fill optional columns with default values if missing
129
+ if "alias-id" not in df.columns:
130
+ df["alias-id"] = None
131
+ if "attribute" not in df.columns:
132
+ df["attribute"] = 0
133
+
134
+ # Replace NaN with None for optional columns
135
+ for column in optional_columns:
136
+ if column in df.columns:
137
+ df[column] = df[column].where(pd.notnull(df[column]), None)
138
+
139
+ # Build the list of time-series entries
140
+ assigned_locs = df.apply(
141
+ lambda entry: {
142
+ "office-id": entry["office-id"],
143
+ "location-id": entry["location-id"],
144
+ "alias-id": entry["alias-id"],
145
+ "attribute": entry["attribute"],
146
+ **(
147
+ {"ref-location-id": entry["ref-location-id"]}
148
+ if "ref-location-id" in entry and pd.notna(entry["ref-location-id"])
149
+ else {}
150
+ ),
151
+ },
152
+ axis=1,
153
+ ).tolist()
154
+
155
+ # Construct the final JSON dictionary
156
+ json_dict = {
157
+ "office-id": group_office_id,
158
+ "id": group_id,
159
+ "location-category": {"office-id": category_office_id, "id": category_id},
160
+ "assigned-locations": assigned_locs,
161
+ }
162
+
163
+ return json_dict
164
+
165
+
88
166
  def store_location_groups(data: JSON) -> None:
89
167
  """
90
168
  Create new Location Group
@@ -140,7 +218,12 @@ def update_location_group(
140
218
  api.patch(endpoint=endpoint, data=data, params=params, api_version=1)
141
219
 
142
220
 
143
- def delete_location_group(group_id: str, category_id: str, office_id: str) -> None:
221
+ def delete_location_group(
222
+ group_id: str,
223
+ category_id: str,
224
+ office_id: str,
225
+ cascade_delete: Optional[bool] = False,
226
+ ) -> None:
144
227
  """Deletes requested time series group
145
228
 
146
229
  Parameters
@@ -161,6 +244,7 @@ def delete_location_group(group_id: str, category_id: str, office_id: str) -> No
161
244
  params = {
162
245
  "office": office_id,
163
246
  "category-id": category_id,
247
+ "cascade-delete": cascade_delete,
164
248
  }
165
249
 
166
250
  return api.delete(endpoint, params=params, api_version=1)
@@ -128,7 +128,7 @@ def delete_location(
128
128
  return api.delete(endpoint, params=params)
129
129
 
130
130
 
131
- def store_location(data: JSON) -> None:
131
+ def store_location(data: JSON, fail_if_exists: bool = True) -> None:
132
132
  """
133
133
  This method is used to store and update location's data through CWMS Data API.
134
134
 
@@ -137,6 +137,10 @@ def store_location(data: JSON) -> None:
137
137
  data : dict
138
138
  A dictionary representing the JSON data to be stored.
139
139
  If the `data` value is None, a `ValueError` will be raised.
140
+ fail_if_exists : bool, optional
141
+ A boolean value indicating whether to fail if the outlet already exists.
142
+ Default is True.
143
+
140
144
 
141
145
  Returns
142
146
  -------
@@ -148,8 +152,8 @@ def store_location(data: JSON) -> None:
148
152
  raise ValueError("Storing location requires a JSON data dictionary")
149
153
 
150
154
  endpoint = "locations"
151
-
152
- return api.post(endpoint, data)
155
+ params = {"fail-if-exists": fail_if_exists}
156
+ return api.post(endpoint, data, params=params)
153
157
 
154
158
 
155
159
  def update_location(location_id: str, data: JSON) -> None:
@@ -116,8 +116,15 @@ def store_measurements(
116
116
  "fail-if-exists": fail_if_exists,
117
117
  }
118
118
 
119
- if not isinstance(data, dict):
120
- raise ValueError("Cannot store a timeseries without a JSON data dictionary")
119
+ if not isinstance(data, list):
120
+ raise ValueError(
121
+ "Cannot store a measurement without a JSON list, object is not a list of dictionaries"
122
+ )
123
+ for item in data:
124
+ if not isinstance(item, dict):
125
+ raise ValueError(
126
+ "Cannot store a measurement without a JSON list: a non-dictionary object was found"
127
+ )
121
128
 
122
129
  return api.post(endpoint, data, params, api_version=1)
123
130
 
@@ -175,3 +182,28 @@ def delete_measurements(
175
182
  }
176
183
 
177
184
  return api.delete(endpoint, params, api_version=1)
185
+
186
+
187
+ def get_measurements_extents(
188
+ office_mask: Optional[str] = None,
189
+ ) -> Data:
190
+ """Get time extents of streamflow measurements
191
+
192
+ Parameters
193
+ ----------
194
+ office_mask: string
195
+ Office Id used to filter the results.
196
+
197
+ Returns
198
+ -------
199
+ cwms data type. data.json will return the JSON output and data.df will return a dataframe. Dates returned are all in UTC.
200
+
201
+ """
202
+ endpoint = "measurements/time-extents"
203
+
204
+ params = {
205
+ "office-mask": office_mask,
206
+ }
207
+
208
+ response = api.get(endpoint, params, api_version=1)
209
+ return Data(response) # , selector=selector)