sfeos-helpers 5.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,469 @@
1
+ """Client implementation for the STAC API Aggregation Extension."""
2
+
3
+ from pathlib import Path
4
+ from typing import Annotated, Any, Dict, List, Optional, Union
5
+ from urllib.parse import unquote_plus, urljoin
6
+
7
+ import attr
8
+ import orjson
9
+ from fastapi import HTTPException, Request
10
+ from pygeofilter.backends.cql2_json import to_cql2
11
+ from pygeofilter.parsers.cql2_text import parse as parse_cql2_text
12
+ from stac_pydantic.shared import BBox
13
+
14
+ from stac_fastapi.core.base_database_logic import BaseDatabaseLogic
15
+ from stac_fastapi.core.base_settings import ApiBaseSettings
16
+ from stac_fastapi.core.datetime_utils import format_datetime_range
17
+ from stac_fastapi.core.extensions.aggregation import EsAggregationExtensionPostRequest
18
+ from stac_fastapi.core.session import Session
19
+ from stac_fastapi.extensions.core.aggregation.client import AsyncBaseAggregationClient
20
+ from stac_fastapi.extensions.core.aggregation.types import (
21
+ Aggregation,
22
+ AggregationCollection,
23
+ )
24
+ from stac_fastapi.types.rfc3339 import DateTimeType
25
+
26
+ from .format import frequency_agg, metric_agg
27
+
28
+
29
+ @attr.s
30
+ class EsAsyncBaseAggregationClient(AsyncBaseAggregationClient):
31
+ """Defines a pattern for implementing the STAC aggregation extension with Elasticsearch/OpenSearch."""
32
+
33
+ database: BaseDatabaseLogic = attr.ib()
34
+ settings: ApiBaseSettings = attr.ib()
35
+ session: Session = attr.ib(default=attr.Factory(Session.create_from_env))
36
+
37
+ # Default aggregations to use if none are specified
38
+ DEFAULT_AGGREGATIONS = [
39
+ {"name": "total_count", "data_type": "integer"},
40
+ {"name": "datetime_max", "data_type": "datetime"},
41
+ {"name": "datetime_min", "data_type": "datetime"},
42
+ {
43
+ "name": "datetime_frequency",
44
+ "data_type": "frequency_distribution",
45
+ "frequency_distribution_data_type": "datetime",
46
+ },
47
+ {
48
+ "name": "collection_frequency",
49
+ "data_type": "frequency_distribution",
50
+ "frequency_distribution_data_type": "string",
51
+ },
52
+ {
53
+ "name": "geometry_geohash_grid_frequency",
54
+ "data_type": "frequency_distribution",
55
+ "frequency_distribution_data_type": "string",
56
+ },
57
+ {
58
+ "name": "geometry_geotile_grid_frequency",
59
+ "data_type": "frequency_distribution",
60
+ "frequency_distribution_data_type": "string",
61
+ },
62
+ ]
63
+
64
+ # Geo point aggregations
65
+ GEO_POINT_AGGREGATIONS = [
66
+ {
67
+ "name": "grid_code_frequency",
68
+ "data_type": "frequency_distribution",
69
+ "frequency_distribution_data_type": "string",
70
+ },
71
+ ]
72
+
73
+ # Supported datetime intervals
74
+ SUPPORTED_DATETIME_INTERVAL = [
75
+ "year",
76
+ "quarter",
77
+ "month",
78
+ "week",
79
+ "day",
80
+ "hour",
81
+ "minute",
82
+ "second",
83
+ ]
84
+
85
+ # Default datetime interval
86
+ DEFAULT_DATETIME_INTERVAL = "month"
87
+
88
+ # Maximum precision values
89
+ MAX_GEOHASH_PRECISION = 12
90
+ MAX_GEOHEX_PRECISION = 15
91
+ MAX_GEOTILE_PRECISION = 29
92
+
93
+ async def get_aggregations(
94
+ self, collection_id: Optional[str] = None, **kwargs
95
+ ) -> Dict[str, Any]:
96
+ """Get the available aggregations for a catalog or collection defined in the STAC JSON.
97
+
98
+ If no aggregations are defined, default aggregations are used.
99
+
100
+ Args:
101
+ collection_id: Optional collection ID to get aggregations for
102
+ **kwargs: Additional keyword arguments
103
+
104
+ Returns:
105
+ Dict[str, Any]: A dictionary containing the available aggregations
106
+ """
107
+ request: Request = kwargs.get("request")
108
+ base_url = str(request.base_url) if request else ""
109
+ links = [{"rel": "root", "type": "application/json", "href": base_url}]
110
+
111
+ if collection_id is not None:
112
+ collection_endpoint = urljoin(base_url, f"collections/{collection_id}")
113
+ links.extend(
114
+ [
115
+ {
116
+ "rel": "collection",
117
+ "type": "application/json",
118
+ "href": collection_endpoint,
119
+ },
120
+ {
121
+ "rel": "self",
122
+ "type": "application/json",
123
+ "href": urljoin(collection_endpoint + "/", "aggregations"),
124
+ },
125
+ ]
126
+ )
127
+ if await self.database.check_collection_exists(collection_id) is None:
128
+ collection = await self.database.find_collection(collection_id)
129
+ aggregations = collection.get(
130
+ "aggregations", self.DEFAULT_AGGREGATIONS.copy()
131
+ )
132
+ else:
133
+ raise IndexError(f"Collection {collection_id} does not exist")
134
+ else:
135
+ links.append(
136
+ {
137
+ "rel": "self",
138
+ "type": "application/json",
139
+ "href": urljoin(base_url, "aggregations"),
140
+ }
141
+ )
142
+ aggregations = self.DEFAULT_AGGREGATIONS.copy()
143
+
144
+ return {
145
+ "type": "AggregationCollection",
146
+ "aggregations": aggregations,
147
+ "links": links,
148
+ }
149
+
150
+ def extract_precision(
151
+ self, precision: Union[int, None], min_value: int, max_value: int
152
+ ) -> int:
153
+ """Ensure that the aggregation precision value is within a valid range.
154
+
155
+ Args:
156
+ precision: The precision value to validate
157
+ min_value: The minimum allowed precision value
158
+ max_value: The maximum allowed precision value
159
+
160
+ Returns:
161
+ int: A validated precision value
162
+
163
+ Raises:
164
+ HTTPException: If the precision is outside the valid range
165
+ """
166
+ if precision is None:
167
+ return min_value
168
+ if precision < min_value or precision > max_value:
169
+ raise HTTPException(
170
+ status_code=400,
171
+ detail=f"Invalid precision value. Must be between {min_value} and {max_value}",
172
+ )
173
+ return precision
174
+
175
+ def extract_date_histogram_interval(self, value: Optional[str]) -> str:
176
+ """Ensure that the interval for the date histogram is valid.
177
+
178
+ If no value is provided, the default will be returned.
179
+
180
+ Args:
181
+ value: The interval value to validate
182
+
183
+ Returns:
184
+ str: A validated date histogram interval
185
+
186
+ Raises:
187
+ HTTPException: If the supplied value is not in the supported intervals
188
+ """
189
+ if value is not None:
190
+ if value not in self.SUPPORTED_DATETIME_INTERVAL:
191
+ raise HTTPException(
192
+ status_code=400,
193
+ detail=f"Invalid datetime interval. Must be one of {self.SUPPORTED_DATETIME_INTERVAL}",
194
+ )
195
+ else:
196
+ return value
197
+ else:
198
+ return self.DEFAULT_DATETIME_INTERVAL
199
+
200
+ def get_filter(self, filter, filter_lang):
201
+ """Format the filter parameter in cql2-json or cql2-text.
202
+
203
+ Args:
204
+ filter: The filter expression
205
+ filter_lang: The filter language (cql2-json or cql2-text)
206
+
207
+ Returns:
208
+ dict: A formatted filter expression
209
+
210
+ Raises:
211
+ HTTPException: If the filter language is not supported
212
+ """
213
+ if filter_lang == "cql2-text":
214
+ return orjson.loads(to_cql2(parse_cql2_text(filter)))
215
+ elif filter_lang == "cql2-json":
216
+ if isinstance(filter, str):
217
+ return orjson.loads(unquote_plus(filter))
218
+ else:
219
+ return filter
220
+ else:
221
+ raise HTTPException(
222
+ status_code=400,
223
+ detail=f"Unknown filter-lang: {filter_lang}. Only cql2-json or cql2-text are supported.",
224
+ )
225
+
226
+ async def aggregate(
227
+ self,
228
+ aggregate_request: Optional[EsAggregationExtensionPostRequest] = None,
229
+ collection_id: Optional[
230
+ Annotated[str, Path(description="Collection ID")]
231
+ ] = None,
232
+ collections: Optional[List[str]] = [],
233
+ datetime: Optional[DateTimeType] = None,
234
+ intersects: Optional[str] = None,
235
+ filter_lang: Optional[str] = None,
236
+ filter_expr: Optional[str] = None,
237
+ aggregations: Optional[str] = None,
238
+ ids: Optional[List[str]] = None,
239
+ bbox: Optional[BBox] = None,
240
+ centroid_geohash_grid_frequency_precision: Optional[int] = None,
241
+ centroid_geohex_grid_frequency_precision: Optional[int] = None,
242
+ centroid_geotile_grid_frequency_precision: Optional[int] = None,
243
+ geometry_geohash_grid_frequency_precision: Optional[int] = None,
244
+ geometry_geotile_grid_frequency_precision: Optional[int] = None,
245
+ datetime_frequency_interval: Optional[str] = None,
246
+ **kwargs,
247
+ ) -> Union[Dict, Exception]:
248
+ """Get aggregations from the database."""
249
+ request: Request = kwargs["request"]
250
+ base_url = str(request.base_url)
251
+ path = request.url.path
252
+ search = self.database.make_search()
253
+
254
+ if aggregate_request is None:
255
+
256
+ base_args = {
257
+ "collections": collections,
258
+ "ids": ids,
259
+ "bbox": bbox,
260
+ "aggregations": aggregations,
261
+ "centroid_geohash_grid_frequency_precision": centroid_geohash_grid_frequency_precision,
262
+ "centroid_geohex_grid_frequency_precision": centroid_geohex_grid_frequency_precision,
263
+ "centroid_geotile_grid_frequency_precision": centroid_geotile_grid_frequency_precision,
264
+ "geometry_geohash_grid_frequency_precision": geometry_geohash_grid_frequency_precision,
265
+ "geometry_geotile_grid_frequency_precision": geometry_geotile_grid_frequency_precision,
266
+ "datetime_frequency_interval": datetime_frequency_interval,
267
+ }
268
+
269
+ if collection_id:
270
+ collections = [str(collection_id)]
271
+
272
+ if intersects:
273
+ base_args["intersects"] = orjson.loads(unquote_plus(intersects))
274
+
275
+ if datetime:
276
+ base_args["datetime"] = format_datetime_range(datetime)
277
+
278
+ if filter_expr:
279
+ base_args["filter"] = self.get_filter(filter_expr, filter_lang)
280
+ aggregate_request = EsAggregationExtensionPostRequest(**base_args)
281
+ else:
282
+ # Workaround for optional path param in POST requests
283
+ if "collections" in path:
284
+ collection_id = path.split("/")[2]
285
+
286
+ filter_lang = "cql2-json"
287
+ if aggregate_request.filter_expr:
288
+ aggregate_request.filter_expr = self.get_filter(
289
+ aggregate_request.filter_expr, filter_lang
290
+ )
291
+
292
+ if collection_id:
293
+ if aggregate_request.collections:
294
+ raise HTTPException(
295
+ status_code=400,
296
+ detail="Cannot query multiple collections when executing '/collections/<collection_id>/aggregate'. Use '/aggregate' and the collections field instead",
297
+ )
298
+ else:
299
+ aggregate_request.collections = [collection_id]
300
+
301
+ if (
302
+ aggregate_request.aggregations is None
303
+ or aggregate_request.aggregations == []
304
+ ):
305
+ raise HTTPException(
306
+ status_code=400,
307
+ detail="No 'aggregations' found. Use '/aggregations' to return available aggregations",
308
+ )
309
+
310
+ if aggregate_request.ids:
311
+ search = self.database.apply_ids_filter(
312
+ search=search, item_ids=aggregate_request.ids
313
+ )
314
+
315
+ if aggregate_request.datetime:
316
+ search = self.database.apply_datetime_filter(
317
+ search=search, interval=aggregate_request.datetime
318
+ )
319
+
320
+ if aggregate_request.bbox:
321
+ bbox = aggregate_request.bbox
322
+ if len(bbox) == 6:
323
+ bbox = [bbox[0], bbox[1], bbox[3], bbox[4]]
324
+
325
+ search = self.database.apply_bbox_filter(search=search, bbox=bbox)
326
+
327
+ if aggregate_request.intersects:
328
+ search = self.database.apply_intersects_filter(
329
+ search=search, intersects=aggregate_request.intersects
330
+ )
331
+
332
+ if aggregate_request.collections:
333
+ search = self.database.apply_collections_filter(
334
+ search=search, collection_ids=aggregate_request.collections
335
+ )
336
+ # validate that aggregations are supported for all collections
337
+ for collection_id in aggregate_request.collections:
338
+ aggregation_info = await self.get_aggregations(
339
+ collection_id=collection_id, request=request
340
+ )
341
+ supported_aggregations = (
342
+ aggregation_info["aggregations"] + self.DEFAULT_AGGREGATIONS
343
+ )
344
+
345
+ for agg_name in aggregate_request.aggregations:
346
+ if agg_name not in set([x["name"] for x in supported_aggregations]):
347
+ raise HTTPException(
348
+ status_code=400,
349
+ detail=f"Aggregation {agg_name} not supported by collection {collection_id}",
350
+ )
351
+ else:
352
+ # Validate that the aggregations requested are supported by the catalog
353
+ aggregation_info = await self.get_aggregations(request=request)
354
+ supported_aggregations = aggregation_info["aggregations"]
355
+ for agg_name in aggregate_request.aggregations:
356
+ if agg_name not in [x["name"] for x in supported_aggregations]:
357
+ raise HTTPException(
358
+ status_code=400,
359
+ detail=f"Aggregation {agg_name} not supported at catalog level",
360
+ )
361
+
362
+ if aggregate_request.filter_expr:
363
+ try:
364
+ search = await self.database.apply_cql2_filter(
365
+ search, aggregate_request.filter_expr
366
+ )
367
+ except Exception as e:
368
+ raise HTTPException(
369
+ status_code=400, detail=f"Error with cql2 filter: {e}"
370
+ )
371
+
372
+ centroid_geohash_grid_precision = self.extract_precision(
373
+ aggregate_request.centroid_geohash_grid_frequency_precision,
374
+ 1,
375
+ self.MAX_GEOHASH_PRECISION,
376
+ )
377
+
378
+ centroid_geohex_grid_precision = self.extract_precision(
379
+ aggregate_request.centroid_geohex_grid_frequency_precision,
380
+ 0,
381
+ self.MAX_GEOHEX_PRECISION,
382
+ )
383
+
384
+ centroid_geotile_grid_precision = self.extract_precision(
385
+ aggregate_request.centroid_geotile_grid_frequency_precision,
386
+ 0,
387
+ self.MAX_GEOTILE_PRECISION,
388
+ )
389
+
390
+ geometry_geohash_grid_precision = self.extract_precision(
391
+ aggregate_request.geometry_geohash_grid_frequency_precision,
392
+ 1,
393
+ self.MAX_GEOHASH_PRECISION,
394
+ )
395
+
396
+ geometry_geotile_grid_precision = self.extract_precision(
397
+ aggregate_request.geometry_geotile_grid_frequency_precision,
398
+ 0,
399
+ self.MAX_GEOTILE_PRECISION,
400
+ )
401
+
402
+ datetime_frequency_interval = self.extract_date_histogram_interval(
403
+ aggregate_request.datetime_frequency_interval,
404
+ )
405
+
406
+ try:
407
+ db_response = await self.database.aggregate(
408
+ collections,
409
+ aggregate_request.aggregations,
410
+ search,
411
+ centroid_geohash_grid_precision,
412
+ centroid_geohex_grid_precision,
413
+ centroid_geotile_grid_precision,
414
+ geometry_geohash_grid_precision,
415
+ geometry_geotile_grid_precision,
416
+ datetime_frequency_interval,
417
+ )
418
+ except Exception as error:
419
+ if not isinstance(error, IndexError):
420
+ raise error
421
+ aggs: List[Aggregation] = []
422
+ if db_response:
423
+ result_aggs = db_response.get("aggregations", {})
424
+ for agg in {
425
+ frozenset(item.items()): item
426
+ for item in supported_aggregations + self.GEO_POINT_AGGREGATIONS
427
+ }.values():
428
+ if agg["name"] in aggregate_request.aggregations:
429
+ if agg["name"].endswith("_frequency"):
430
+ aggs.append(
431
+ frequency_agg(result_aggs, agg["name"], agg["data_type"])
432
+ )
433
+ else:
434
+ aggs.append(
435
+ metric_agg(result_aggs, agg["name"], agg["data_type"])
436
+ )
437
+ links = [
438
+ {"rel": "root", "type": "application/json", "href": base_url},
439
+ ]
440
+
441
+ if collection_id:
442
+ collection_endpoint = urljoin(base_url, f"collections/{collection_id}")
443
+ links.extend(
444
+ [
445
+ {
446
+ "rel": "collection",
447
+ "type": "application/json",
448
+ "href": collection_endpoint,
449
+ },
450
+ {
451
+ "rel": "self",
452
+ "type": "application/json",
453
+ "href": urljoin(collection_endpoint, "aggregate"),
454
+ },
455
+ ]
456
+ )
457
+ else:
458
+ links.append(
459
+ {
460
+ "rel": "self",
461
+ "type": "application/json",
462
+ "href": urljoin(base_url, "aggregate"),
463
+ }
464
+ )
465
+ results = AggregationCollection(
466
+ type="AggregationCollection", aggregations=aggs, links=links
467
+ )
468
+
469
+ return results
@@ -0,0 +1,60 @@
1
+ """Formatting functions for aggregation responses."""
2
+
3
+ from datetime import datetime
4
+ from typing import Any, Dict
5
+
6
+ from stac_fastapi.core.datetime_utils import datetime_to_str
7
+ from stac_fastapi.extensions.core.aggregation.types import Aggregation
8
+
9
+
10
+ def frequency_agg(es_aggs: Dict[str, Any], name: str, data_type: str) -> Aggregation:
11
+ """Format an aggregation for a frequency distribution aggregation.
12
+
13
+ Args:
14
+ es_aggs: The Elasticsearch/OpenSearch aggregation response
15
+ name: The name of the aggregation
16
+ data_type: The data type of the aggregation
17
+
18
+ Returns:
19
+ Aggregation: A formatted aggregation response
20
+ """
21
+ buckets = []
22
+ for bucket in es_aggs.get(name, {}).get("buckets", []):
23
+ bucket_data = {
24
+ "key": bucket.get("key_as_string") or bucket.get("key"),
25
+ "data_type": data_type,
26
+ "frequency": bucket.get("doc_count"),
27
+ "to": bucket.get("to"),
28
+ "from": bucket.get("from"),
29
+ }
30
+ buckets.append(bucket_data)
31
+ return Aggregation(
32
+ name=name,
33
+ data_type="frequency_distribution",
34
+ overflow=es_aggs.get(name, {}).get("sum_other_doc_count", 0),
35
+ buckets=buckets,
36
+ )
37
+
38
+
39
+ def metric_agg(es_aggs: Dict[str, Any], name: str, data_type: str) -> Aggregation:
40
+ """Format an aggregation for a metric aggregation.
41
+
42
+ Args:
43
+ es_aggs: The Elasticsearch/OpenSearch aggregation response
44
+ name: The name of the aggregation
45
+ data_type: The data type of the aggregation
46
+
47
+ Returns:
48
+ Aggregation: A formatted aggregation response
49
+ """
50
+ value = es_aggs.get(name, {}).get("value_as_string") or es_aggs.get(name, {}).get(
51
+ "value"
52
+ )
53
+ # ES 7.x does not return datetimes with a 'value_as_string' field
54
+ if "datetime" in name and isinstance(value, float):
55
+ value = datetime_to_str(datetime.fromtimestamp(value / 1e3))
56
+ return Aggregation(
57
+ name=name,
58
+ data_type=data_type,
59
+ value=value,
60
+ )
@@ -0,0 +1,71 @@
1
+ """Shared database operations for stac-fastapi elasticsearch and opensearch backends.
2
+
3
+ This module provides shared database functionality used by both the Elasticsearch and OpenSearch
4
+ implementations of STAC FastAPI. It includes:
5
+
6
+ 1. Index management functions for creating and deleting indices
7
+ 2. Query building functions for constructing search queries
8
+ 3. Mapping functions for working with Elasticsearch/OpenSearch mappings
9
+ 4. Document operations for working with documents
10
+ 5. Utility functions for database operations
11
+ 6. Datetime utilities for query formatting
12
+
13
+ The database package is organized as follows:
14
+ - index.py: Index management functions
15
+ - query.py: Query building functions
16
+ - mapping.py: Mapping functions
17
+ - document.py: Document operations
18
+ - utils.py: Utility functions
19
+ - datetime.py: Datetime utilities for query formatting
20
+
21
+ When adding new functionality to this package, consider:
22
+ 1. Will this code be used by both Elasticsearch and OpenSearch implementations?
23
+ 2. Is the functionality stable and unlikely to diverge between implementations?
24
+ 3. Is the function well-documented with clear input/output contracts?
25
+
26
+ Function Naming Conventions:
27
+ - All shared functions should end with `_shared` to clearly indicate they're meant to be used by both implementations
28
+ - Function names should be descriptive and indicate their purpose
29
+ - Parameter names should be consistent across similar functions
30
+ """
31
+
32
+ # Re-export all functions for backward compatibility
33
+ from .datetime import return_date
34
+ from .document import mk_actions, mk_item_id
35
+ from .index import (
36
+ create_index_templates_shared,
37
+ delete_item_index_shared,
38
+ index_alias_by_collection_id,
39
+ index_by_collection_id,
40
+ indices,
41
+ )
42
+ from .mapping import get_queryables_mapping_shared
43
+ from .query import (
44
+ apply_free_text_filter_shared,
45
+ apply_intersects_filter_shared,
46
+ populate_sort_shared,
47
+ )
48
+ from .utils import get_bool_env, validate_refresh
49
+
50
+ __all__ = [
51
+ # Index operations
52
+ "create_index_templates_shared",
53
+ "delete_item_index_shared",
54
+ "index_alias_by_collection_id",
55
+ "index_by_collection_id",
56
+ "indices",
57
+ # Query operations
58
+ "apply_free_text_filter_shared",
59
+ "apply_intersects_filter_shared",
60
+ "populate_sort_shared",
61
+ # Mapping operations
62
+ "get_queryables_mapping_shared",
63
+ # Document operations
64
+ "mk_item_id",
65
+ "mk_actions",
66
+ # Utility functions
67
+ "validate_refresh",
68
+ "get_bool_env",
69
+ # Datetime utilities
70
+ "return_date",
71
+ ]
@@ -0,0 +1,60 @@
1
+ """Elasticsearch/OpenSearch-specific datetime utilities.
2
+
3
+ This module provides datetime utility functions specifically designed for
4
+ Elasticsearch and OpenSearch query formatting.
5
+ """
6
+
7
+ from datetime import datetime as datetime_type
8
+ from typing import Dict, Optional, Union
9
+
10
+ from stac_fastapi.types.rfc3339 import DateTimeType
11
+
12
+
13
+ def return_date(
14
+ interval: Optional[Union[DateTimeType, str]]
15
+ ) -> Dict[str, Optional[str]]:
16
+ """
17
+ Convert a date interval to an Elasticsearch/OpenSearch query format.
18
+
19
+ This function converts a date interval (which may be a datetime, a tuple of one or two datetimes,
20
+ a string representing a datetime or range, or None) into a dictionary for filtering
21
+ search results with Elasticsearch/OpenSearch.
22
+
23
+ This function ensures the output dictionary contains 'gte' and 'lte' keys,
24
+ even if they are set to None, to prevent KeyError in the consuming logic.
25
+
26
+ Args:
27
+ interval (Optional[Union[DateTimeType, str]]): The date interval, which might be a single datetime,
28
+ a tuple with one or two datetimes, a string, or None.
29
+
30
+ Returns:
31
+ dict: A dictionary representing the date interval for use in filtering search results,
32
+ always containing 'gte' and 'lte' keys.
33
+ """
34
+ result: Dict[str, Optional[str]] = {"gte": None, "lte": None}
35
+
36
+ if interval is None:
37
+ return result
38
+
39
+ if isinstance(interval, str):
40
+ if "/" in interval:
41
+ parts = interval.split("/")
42
+ result["gte"] = parts[0] if parts[0] != ".." else None
43
+ result["lte"] = parts[1] if len(parts) > 1 and parts[1] != ".." else None
44
+ else:
45
+ converted_time = interval if interval != ".." else None
46
+ result["gte"] = result["lte"] = converted_time
47
+ return result
48
+
49
+ if isinstance(interval, datetime_type):
50
+ datetime_iso = interval.isoformat()
51
+ result["gte"] = result["lte"] = datetime_iso
52
+ elif isinstance(interval, tuple):
53
+ start, end = interval
54
+ # Ensure datetimes are converted to UTC and formatted with 'Z'
55
+ if start:
56
+ result["gte"] = start.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z"
57
+ if end:
58
+ result["lte"] = end.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z"
59
+
60
+ return result