eodag 3.8.1__py3-none-any.whl → 3.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. eodag/api/core.py +1 -1
  2. eodag/api/product/drivers/generic.py +5 -1
  3. eodag/api/product/metadata_mapping.py +109 -8
  4. eodag/cli.py +36 -4
  5. eodag/config.py +5 -2
  6. eodag/plugins/apis/ecmwf.py +3 -1
  7. eodag/plugins/apis/usgs.py +2 -1
  8. eodag/plugins/authentication/aws_auth.py +228 -37
  9. eodag/plugins/authentication/base.py +12 -2
  10. eodag/plugins/authentication/oauth.py +5 -0
  11. eodag/plugins/base.py +3 -2
  12. eodag/plugins/download/aws.py +44 -285
  13. eodag/plugins/download/base.py +3 -2
  14. eodag/plugins/download/creodias_s3.py +1 -38
  15. eodag/plugins/download/http.py +111 -103
  16. eodag/plugins/download/s3rest.py +3 -1
  17. eodag/plugins/manager.py +2 -1
  18. eodag/plugins/search/__init__.py +2 -1
  19. eodag/plugins/search/base.py +2 -1
  20. eodag/plugins/search/build_search_result.py +2 -2
  21. eodag/plugins/search/creodias_s3.py +9 -1
  22. eodag/plugins/search/qssearch.py +3 -1
  23. eodag/resources/ext_product_types.json +1 -1
  24. eodag/resources/product_types.yml +220 -30
  25. eodag/resources/providers.yml +633 -88
  26. eodag/resources/stac_provider.yml +5 -2
  27. eodag/resources/user_conf_template.yml +0 -5
  28. eodag/rest/core.py +8 -0
  29. eodag/rest/errors.py +9 -0
  30. eodag/rest/server.py +8 -0
  31. eodag/rest/stac.py +8 -0
  32. eodag/rest/utils/__init__.py +2 -4
  33. eodag/rest/utils/rfc3339.py +1 -1
  34. eodag/utils/__init__.py +69 -54
  35. eodag/utils/dates.py +204 -0
  36. eodag/utils/s3.py +187 -168
  37. {eodag-3.8.1.dist-info → eodag-3.9.0.dist-info}/METADATA +4 -3
  38. {eodag-3.8.1.dist-info → eodag-3.9.0.dist-info}/RECORD +42 -42
  39. {eodag-3.8.1.dist-info → eodag-3.9.0.dist-info}/entry_points.txt +1 -1
  40. eodag/utils/rest.py +0 -100
  41. {eodag-3.8.1.dist-info → eodag-3.9.0.dist-info}/WHEEL +0 -0
  42. {eodag-3.8.1.dist-info → eodag-3.9.0.dist-info}/licenses/LICENSE +0 -0
  43. {eodag-3.8.1.dist-info → eodag-3.9.0.dist-info}/top_level.txt +0 -0
eodag/utils/s3.py CHANGED
@@ -22,16 +22,13 @@ import logging
22
22
  import os
23
23
  import uuid
24
24
  from dataclasses import dataclass, field
25
- from datetime import datetime
26
25
  from typing import TYPE_CHECKING
27
- from urllib.parse import urlparse
28
26
  from zipfile import ZIP_STORED, ZipFile
29
27
 
30
- import boto3
31
28
  import botocore
32
29
  import botocore.exceptions
33
30
  from concurrent.futures import FIRST_COMPLETED, ThreadPoolExecutor, wait
34
- from stream_zip import ZIP_AUTO, stream_zip
31
+ from zipstream import ZipStream
35
32
 
36
33
  from eodag.plugins.authentication.aws_auth import AwsAuth
37
34
  from eodag.utils import (
@@ -43,9 +40,7 @@ from eodag.utils import (
43
40
  )
44
41
  from eodag.utils.exceptions import (
45
42
  AuthenticationError,
46
- DownloadError,
47
43
  InvalidDataError,
48
- MisconfiguredError,
49
44
  NotAvailableError,
50
45
  )
51
46
 
@@ -54,7 +49,6 @@ if TYPE_CHECKING:
54
49
  from zipfile import ZipInfo
55
50
 
56
51
  from mypy_boto3_s3.client import S3Client
57
- from stream_zip import Method
58
52
 
59
53
  from eodag.api.product import EOProduct # type: ignore
60
54
 
@@ -197,75 +191,88 @@ def _chunks_from_s3_objects(
197
191
  executor: ThreadPoolExecutor,
198
192
  ) -> Iterator[tuple[int, Iterator[bytes]]]:
199
193
  """Download chunks from S3 objects in parallel, respecting byte ranges and file order."""
194
+ # Prepare ranges and futures per file
200
195
  for f_info in files_info:
201
196
  ranges = _compute_file_ranges(f_info, byte_range, range_size)
202
197
 
203
198
  if not ranges:
204
- logger.debug("Skipping %s: no ranges to fetch", f_info.key)
199
+ # Mark as inactive (no futures)
200
+ f_info.futures = {}
201
+ f_info.buffers = {}
202
+ f_info.next_yield = 0
205
203
  continue
206
204
 
207
205
  f_info.buffers = {}
208
206
  f_info.next_yield = 0
209
207
 
210
208
  futures = {}
211
- for start, length in ranges:
209
+ # start,end are absolute offsets in the S3 object (data_start_offset already applied)
210
+ for start, end in ranges:
212
211
  future = executor.submit(
213
212
  fetch_range,
214
213
  f_info.bucket_name,
215
214
  f_info.key,
216
215
  start,
217
- length,
216
+ end,
218
217
  s3_client,
219
218
  )
220
- futures[future] = start
219
+ # Track both start and end so we can compute the yielded length precisely
220
+ futures[future] = (start, end)
221
221
 
222
222
  f_info.futures = futures
223
223
 
224
+ # Keep only files that actually have something to download
225
+ active_indices = [i for i, fi in enumerate(files_info) if fi.futures]
226
+
224
227
  # Combine all futures to wait on globally
225
228
  all_futures = {
226
- fut: (f_info, start)
227
- for f_info in files_info
228
- for fut, start in f_info.futures.items()
229
+ fut: (f_info, start, end)
230
+ for f_info in (files_info[i] for i in active_indices)
231
+ for fut, (start, end) in f_info.futures.items()
229
232
  }
230
233
 
231
- current_file_index = 0
232
-
233
- # Yield chunks per file (one at a time)
234
- while current_file_index < len(files_info):
235
- current_info = files_info[current_file_index]
236
-
237
- def chunks_generator() -> Iterator[bytes]:
238
- """yield chunks of data for the current file."""
239
- nonlocal current_file_index, all_futures
240
- while current_info.next_yield < current_info.size:
241
- # Wait for any futures to complete
242
- done, _ = wait(all_futures.keys(), return_when=FIRST_COMPLETED)
243
-
244
- for fut in done:
245
- f_info, start = all_futures.pop(fut)
246
- data = fut.result()
247
- f_info.buffers[start] = data
248
-
249
- # Yield chunks as they are available
250
- next_start = current_info.next_yield
251
- while next_start in current_info.buffers:
252
- chunk = current_info.buffers.pop(next_start)
253
- if not isinstance(chunk, bytes):
254
- raise InvalidDataError(
255
- f"Expected bytes, got {type(chunk).__name__} in stream chunks: {chunk}"
256
- )
257
- yield chunk
258
-
259
- next_start += range_size
260
- current_info.next_yield = next_start
261
-
262
- # If done with this file, stop yielding chunks for this file
263
- if current_info.next_yield >= current_info.size:
264
- break
265
-
266
- yield current_file_index, chunks_generator()
267
-
268
- current_file_index += 1
234
+ def make_chunks_generator(target_info: S3FileInfo) -> Iterator[bytes]:
235
+ """Create a generator bound to a specific file info (no late-binding bug)."""
236
+ info = target_info # bind
237
+ nonlocal all_futures
238
+ while info.next_yield < info.size:
239
+ # First, try to flush anything already buffered for this file
240
+ next_start = info.next_yield
241
+ flushed = False
242
+ while next_start in info.buffers:
243
+ chunk = info.buffers.pop(next_start)
244
+ if not isinstance(chunk, bytes):
245
+ raise InvalidDataError(
246
+ f"Expected bytes, got {type(chunk).__name__} in stream chunks: {chunk}"
247
+ )
248
+ yield chunk
249
+ next_start += len(chunk)
250
+ info.next_yield = next_start
251
+ flushed = True
252
+
253
+ if info.next_yield >= info.size:
254
+ break
255
+
256
+ # If we flushed something, loop back to try again before waiting
257
+ if flushed:
258
+ continue
259
+
260
+ # Nothing to flush for this file: wait for more futures to complete globally
261
+ if not all_futures:
262
+ # No more incoming data anywhere; stop to avoid waiting on an empty set
263
+ break
264
+
265
+ done, _ = wait(all_futures.keys(), return_when=FIRST_COMPLETED)
266
+ for fut in done:
267
+ f_info, start, end = all_futures.pop(fut)
268
+ data = fut.result()
269
+ # Store buffer with a key relative to the start of the file data
270
+ rel_start = start - f_info.data_start_offset
271
+ f_info.buffers[rel_start] = data
272
+
273
+ # Yield per-file generators with their original indices
274
+ for idx in active_indices:
275
+ yield idx, make_chunks_generator(files_info[idx])
269
276
 
270
277
 
271
278
  def _build_stream_response(
@@ -300,9 +307,6 @@ def _build_stream_response(
300
307
  :param executor: Executor used for concurrent streaming and cleanup.
301
308
  :return: Streaming HTTP response with appropriate content, headers, and media type.
302
309
  """
303
- headers = {
304
- "Accept-Ranges": "bytes",
305
- }
306
310
 
307
311
  def _wrap_generator_with_cleanup(
308
312
  generator: Iterable[bytes], executor: ThreadPoolExecutor
@@ -315,41 +319,32 @@ def _build_stream_response(
315
319
  def _build_response(
316
320
  content_gen: Iterable[bytes],
317
321
  media_type: str,
318
- extra_headers: dict[str, str] = {},
322
+ filename: Optional[str] = None,
323
+ size: Optional[int] = None,
319
324
  ) -> StreamResponse:
320
325
  return StreamResponse(
321
326
  content=_wrap_generator_with_cleanup(content_gen, executor),
322
327
  media_type=media_type,
323
- headers={**headers, **extra_headers},
328
+ headers={"Accept-Ranges": "bytes"},
329
+ filename=filename,
330
+ size=size,
324
331
  )
325
332
 
326
333
  zip_response = (len(files_info) > 1 and compress == "auto") or compress == "zip"
327
334
 
328
335
  if zip_response:
329
- modified_at = datetime.now()
330
- perms = 0o600
331
- total_file_size = sum(f.size for f in files_info)
332
-
333
- def zip_stream() -> Iterator[
334
- tuple[str, datetime, int, Method, Iterable[bytes]]
335
- ]:
336
- for index, chunks_generator in files_iterator:
337
- yield (
338
- files_info[index].rel_path or files_info[index].key,
339
- modified_at,
340
- perms,
341
- ZIP_AUTO(total_file_size, level=0),
342
- chunks_generator,
343
- )
336
+ zs = ZipStream(sized=True)
337
+ for index, chunks_generator in files_iterator:
338
+ file_info = files_info[index]
339
+ file_path = file_info.rel_path or file_info.key
340
+ zs.add(chunks_generator, file_path, size=file_info.size)
344
341
 
345
342
  return _build_response(
346
- content_gen=stream_zip(zip_stream()),
343
+ content_gen=zs,
347
344
  media_type="application/zip",
348
- extra_headers={
349
- "content-disposition": f'attachment; filename="{zip_filename}.zip"'
350
- },
345
+ filename=f"{zip_filename}.zip",
346
+ size=len(zs),
351
347
  )
352
-
353
348
  elif len(files_info) > 1:
354
349
  boundary = uuid.uuid4().hex
355
350
 
@@ -373,7 +368,6 @@ def _build_stream_response(
373
368
  content_gen=multipart_stream(),
374
369
  media_type=f"multipart/mixed; boundary={boundary}",
375
370
  )
376
-
377
371
  else:
378
372
  index, chunks_generator = next(files_iterator)
379
373
  first_chunk = next(chunks_generator)
@@ -386,7 +380,7 @@ def _build_stream_response(
386
380
  return _build_response(
387
381
  content_gen=single_file_stream(),
388
382
  media_type=files_info[index].data_type,
389
- extra_headers={"content-disposition": f'attachment; filename="{filename}"'},
383
+ filename=filename,
390
384
  )
391
385
 
392
386
 
@@ -400,78 +394,120 @@ def stream_download_from_s3(
400
394
  max_workers: int = 8,
401
395
  ) -> StreamResponse:
402
396
  """
403
- Stream data from one or more S3 objects in chunks, with support for global byte ranges
404
- and partial file extraction from ZIP archives.
405
-
406
- This function downloads product data from S3 using concurrent range requests across one or
407
- multiple files. It divides the requested data into chunks (default: 8 MiB) and issues
408
- parallel HTTP range requests to optimize download throughput. This is particularly useful
409
- for large files or datasets stored across multiple S3 objects.
410
-
411
- If the S3 key refers to a path inside a ``.zip`` file (denoted by ``.zip!<internal_path>``),
412
- the function extracts the specified file from the archive only if it is stored uncompressed
413
- (ZIP method = STORE). Compressed formats (like DEFLATE) are not supported for partial ZIP extraction.
414
-
415
- The function supports global byte range filtering via the ``byte_range`` parameter, which allows
416
- requesting only a specific portion of the logical file stream across all provided objects.
417
-
418
- Downloads are performed concurrently using a thread pool and HTTP range requests. Each chunk is downloaded
419
- as a separate HTTP request and yielded in file order.
420
-
421
- The ``compress`` parameter determines the output format:
422
-
423
- - ``zip``: Always produce a ZIP archive containing all files.
424
- - ``raw``: Stream files directly without wrapping, either as a single file or multipart response.
425
- - ``auto``: Automatically select the format:
426
- - raw stream if only a single file is requested
427
- - ZIP archive if multiple files are requested
428
-
429
- :param s3_client: A configured S3 client capable of making range requests.
430
- :param files_info: List of S3FileInfo objects representing the files to download.
431
- :param byte_range: Tuple (start, end) defining the inclusive global byte range to download across all objects.
432
- Either value can be None to indicate open-ended range.
433
- :param compress: Determines the output format of the streamed response.
434
- :param zip_filename: The base filename to use when producing a ZIP archive (without extension).
435
- :param range_size: The size in bytes of each download chunk. Defaults to 8 MiB.
436
- :param max_workers: The maximum number of concurrent download tasks. Controls the size of the thread pool.
437
- :return: Streaming HTTP response with content according to the requested format.
438
- :raises DownloadError: If any error occurs during streaming from S3, including missing files or
439
- unsupported ZIP compression.
397
+ Stream data from one or more S3 objects in chunks, with support for global byte ranges.
398
+
399
+ This function provides efficient streaming download of S3 objects with support for:
400
+
401
+ * Single file streaming with direct MIME type detection
402
+ * Multiple file streaming as ZIP archives
403
+ * Byte range requests for partial content
404
+ * Files within ZIP archives (using ``.zip!`` notation)
405
+ * Concurrent chunk downloading for improved performance
406
+ * Memory-efficient streaming without loading entire files
407
+
408
+ The response format depends on the compress parameter and number of files:
409
+
410
+ * Single file + ``compress="raw"`` or ``"auto"``: streams file directly with detected MIME type
411
+ * Multiple files + ``compress="zip"`` or ``"auto"``: creates ZIP archive containing all files
412
+ * ``compress="zip"``: always creates ZIP archive regardless of file count
413
+
414
+ For files stored within ZIP archives, use the ``.zip!`` notation in the ``S3FileInfo.key``:
415
+ ``"path/to/archive.zip!internal/file.txt"``
416
+
417
+ :param s3_client: Boto3 S3 client instance for making requests
418
+ :param files_info: List of S3FileInfo objects describing files to download.
419
+ Each object must contain at minimum: ``bucket_name``, ``key``, and ``size``.
420
+ Optional fields include: ``data_type``, ``rel_path``, ``zip_filepath``.
421
+ :param byte_range: Global byte range to download as ``(start, end)`` tuple.
422
+ ``None`` values indicate open-ended ranges.
423
+ Applied across the logical concatenation of all files.
424
+ :param compress: Output format control:
425
+
426
+ * ``"zip"``: Always create ZIP archive
427
+ * ``"raw"``: Stream files directly (single) or as multipart (multiple)
428
+ * ``"auto"``: ZIP for multiple files, raw for single file
429
+
430
+ :param zip_filename: Base filename for ZIP archives (without ``.zip`` extension).
431
+ Only used when creating ZIP archives.
432
+ :param range_size: Size of each download chunk in bytes. Larger chunks reduce
433
+ request overhead but use more memory. Default: 8MB.
434
+ :param max_workers: Maximum number of concurrent download threads.
435
+ Higher values improve throughput for multiple ranges.
436
+ :return: StreamResponse object containing:
437
+
438
+ * ``content``: Iterator of bytes for the streaming response
439
+ * ``media_type``: MIME type (``"application/zip"`` for archives, detected type for single files)
440
+ * ``headers``: HTTP headers including Content-Disposition for downloads
441
+
442
+ :rtype: StreamResponse
443
+ :raises InvalidDataError: If ZIP file structures are malformed
444
+ :raises NotAvailableError: If S3 objects cannot be accessed
445
+ :raises AuthenticationError: If S3 credentials are invalid
446
+ :raises NotImplementedError: If compressed files within ZIP archives are encountered
447
+
448
+ Example usage:
449
+
450
+ .. code-block:: python
451
+
452
+ import boto3
453
+ from eodag.utils.s3 import stream_download_from_s3, S3FileInfo
454
+
455
+ # Create S3 client
456
+ s3_client = boto3.client('s3')
457
+
458
+ # Single file download
459
+ files = [S3FileInfo(bucket_name="bucket", key="file.txt", size=1024)]
460
+ response = stream_download_from_s3(s3_client, files)
461
+
462
+ # Multiple files as ZIP archive
463
+ files = [
464
+ S3FileInfo(bucket_name="bucket", key="file1.txt", size=1024),
465
+ S3FileInfo(bucket_name="bucket", key="file2.txt", size=2048)
466
+ ]
467
+ response = stream_download_from_s3(s3_client, files, compress="zip")
468
+
469
+ # File within ZIP archive
470
+ files = [S3FileInfo(
471
+ bucket_name="bucket",
472
+ key="archive.zip!internal.txt",
473
+ size=512
474
+ )]
475
+ response = stream_download_from_s3(s3_client, files)
476
+
477
+ # Process streaming response
478
+ for chunk in response.content:
479
+ # Handle chunk data
480
+ pass
440
481
  """
441
- offset = 0
442
482
 
443
483
  executor = ThreadPoolExecutor(max_workers=max_workers)
444
- try:
445
- for f_info in files_info:
446
- # Check if file is inside a ZIP
447
- if ".zip!" in f_info.key:
448
- future = executor.submit(_prepare_file_in_zip, f_info, s3_client)
449
- f_info.futures[future] = 0
450
-
451
- for f_info in files_info:
452
- for future in f_info.futures:
453
- future.result()
454
- f_info.file_start_offset = offset
455
- offset += f_info.size
456
-
457
- if not f_info.data_type or f_info.data_type == MIME_OCTET_STREAM:
458
- guessed = guess_file_type(f_info.key)
459
- f_info.data_type = guessed or MIME_OCTET_STREAM
460
-
461
- chunks_tuple = _chunks_from_s3_objects(
462
- s3_client,
463
- files_info,
464
- byte_range,
465
- range_size,
466
- executor,
467
- )
468
484
 
469
- return _build_stream_response(
470
- zip_filename, files_info, chunks_tuple, compress, executor
471
- )
472
- except Exception as e:
473
- executor.shutdown(wait=True)
474
- raise DownloadError(str(e)) from e
485
+ # Prepare all files
486
+ offset = 0
487
+ for f_info in files_info:
488
+ if ".zip!" in f_info.key:
489
+ _prepare_file_in_zip(f_info, s3_client)
490
+
491
+ f_info.file_start_offset = offset
492
+ offset += f_info.size
493
+
494
+ if not f_info.data_type or f_info.data_type == MIME_OCTET_STREAM:
495
+ guessed = guess_file_type(f_info.key)
496
+ f_info.data_type = guessed or MIME_OCTET_STREAM
497
+
498
+ # Create the files iterator using the original approach
499
+ files_iterator = _chunks_from_s3_objects(
500
+ s3_client, files_info, byte_range, range_size, executor
501
+ )
502
+
503
+ # Use the existing _build_stream_response function with the additional parameters
504
+ return _build_stream_response(
505
+ zip_filename=zip_filename,
506
+ files_info=files_info,
507
+ files_iterator=files_iterator,
508
+ compress=compress,
509
+ executor=executor,
510
+ )
475
511
 
476
512
 
477
513
  def update_assets_from_s3(
@@ -491,7 +527,6 @@ def update_assets_from_s3(
491
527
  :param content_url: s3 URL pointing to the content that must be listed (defaults to
492
528
  ``product.remote_location`` if empty)
493
529
  """
494
- required_creds = ["aws_access_key_id", "aws_secret_access_key"]
495
530
 
496
531
  if content_url is None:
497
532
  content_url = product.remote_location
@@ -503,35 +538,21 @@ def update_assets_from_s3(
503
538
  return None
504
539
 
505
540
  try:
506
- auth_dict = auth.authenticate()
507
-
508
- if not all(x in auth_dict for x in required_creds):
509
- raise MisconfiguredError(
510
- f"Incomplete credentials for {product.provider}, missing "
511
- f"{[x for x in required_creds if x not in auth_dict]}"
512
- )
513
- if not getattr(auth, "s3_client", None):
514
- auth.s3_client = boto3.client(
515
- service_name="s3",
516
- endpoint_url=s3_endpoint,
517
- aws_access_key_id=auth_dict.get("aws_access_key_id"),
518
- aws_secret_access_key=auth_dict.get("aws_secret_access_key"),
519
- aws_session_token=auth_dict.get("aws_session_token"),
520
- )
521
541
 
522
542
  logger.debug("Listing assets in %s", prefix)
543
+ s3_client = auth.get_s3_client()
523
544
 
524
545
  if prefix.endswith(".zip"):
525
546
  # List prefix zip content
526
547
  assets_urls = [
527
548
  f"zip+s3://{bucket}/{prefix}!{f.filename}"
528
- for f in list_files_in_s3_zipped_object(bucket, prefix, auth.s3_client)
549
+ for f in list_files_in_s3_zipped_object(bucket, prefix, s3_client)
529
550
  ]
530
551
  else:
531
552
  # List files in prefix
532
553
  assets_urls = [
533
554
  f"s3://{bucket}/{obj['Key']}"
534
- for obj in auth.s3_client.list_objects(
555
+ for obj in s3_client.list_objects(
535
556
  Bucket=bucket, Prefix=prefix, MaxKeys=300
536
557
  ).get("Contents", [])
537
558
  ]
@@ -541,12 +562,10 @@ def update_assets_from_s3(
541
562
  key, roles = product.driver.guess_asset_key_and_roles(
542
563
  out_of_zip_url, product
543
564
  )
544
- parsed_url = urlparse(out_of_zip_url)
545
- title = os.path.basename(parsed_url.path)
546
565
 
547
566
  if key and key not in product.assets:
548
567
  product.assets[key] = {
549
- "title": title,
568
+ "title": key, # Normalize title with key
550
569
  "roles": roles,
551
570
  "href": asset_url,
552
571
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: eodag
3
- Version: 3.8.1
3
+ Version: 3.9.0
4
4
  Summary: Earth Observation Data Access Gateway
5
5
  Home-page: https://github.com/CS-SI/eodag
6
6
  Author: CS GROUP - France
@@ -50,10 +50,10 @@ Requires-Dist: python-dateutil
50
50
  Requires-Dist: PyYAML
51
51
  Requires-Dist: requests
52
52
  Requires-Dist: shapely>=2.0.6
53
- Requires-Dist: stream-zip
54
53
  Requires-Dist: tqdm
55
54
  Requires-Dist: typing_extensions>=4.8.0
56
55
  Requires-Dist: urllib3
56
+ Requires-Dist: zipstream-ng
57
57
  Provides-Extra: all
58
58
  Requires-Dist: eodag[all-providers,csw,server,tutorials]; extra == "all"
59
59
  Provides-Extra: all-providers
@@ -124,6 +124,7 @@ Requires-Dist: sphinx; extra == "docs"
124
124
  Requires-Dist: sphinx-autodoc-typehints; extra == "docs"
125
125
  Requires-Dist: sphinx-book-theme>=1.0.0; extra == "docs"
126
126
  Requires-Dist: sphinx-copybutton; extra == "docs"
127
+ Requires-Dist: sphinx-design; extra == "docs"
127
128
  Requires-Dist: sphinx-tabs; extra == "docs"
128
129
  Requires-Dist: sphinxcontrib-programoutput; extra == "docs"
129
130
  Requires-Dist: sphinxemoji; extra == "docs"
@@ -316,7 +317,7 @@ An eodag instance can be exposed through a STAC compliant REST api from the comm
316
317
 
317
318
  .. code-block:: bash
318
319
 
319
- docker run -p 5000:5000 --rm csspace/eodag-server:3.8.1
320
+ docker run -p 5000:5000 --rm csspace/eodag-server:3.9.0
320
321
 
321
322
  You can also browse over your STAC API server using `STAC Browser <https://github.com/radiantearth/stac-browser>`_.
322
323
  Simply run: