eotdl 2025.5.26.post4__py3-none-any.whl → 2025.6.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
eotdl/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "2025.05.26-4"
1
+ __version__ = "2025.06.27"
eotdl/access/__init__.py CHANGED
@@ -15,3 +15,4 @@ from .sentinelhub.parameters import (
15
15
  OUTPUT_FORMAT,
16
16
  )
17
17
  from .sentinelhub.evalscripts import EvalScripts
18
+ from .matches import find_sentinel_matches_by_bb, find_sentinel_matches_by_centroid
@@ -0,0 +1,19 @@
1
+ from datetime import datetime, timedelta
2
+
3
+ from ..access import search_sentinel_imagery
4
+ from ..tools import bbox_from_centroid
5
+
6
+ def find_sentinel_matches_by_centroid(centroid, date, time_buffer, width, height, collection_id="sentinel-2-l2a"):
7
+ dates = [(date - timedelta(days=time_buffer/2)).strftime('%Y-%m-%d'),
8
+ (date + timedelta(days=time_buffer/2)).strftime('%Y-%m-%d')]
9
+ custom_bbox = bbox_from_centroid(x=centroid.y, y=centroid.x, pixel_size=10, width=width, height=height)
10
+ sentinel_matches = list(search_sentinel_imagery(dates, custom_bbox, collection_id))
11
+ return sentinel_matches
12
+
13
+ def find_sentinel_matches_by_bb(bb, date, time_buffer, collection_id="sentinel-2-l2a"):
14
+ if isinstance(date, str):
15
+ date = datetime.strptime(date, '%Y-%m-%d')
16
+ dates = [(date - timedelta(days=time_buffer/2)).strftime('%Y-%m-%d'),
17
+ (date + timedelta(days=time_buffer/2)).strftime('%Y-%m-%d')]
18
+ sentinel_matches = list(search_sentinel_imagery(dates, bb, collection_id))
19
+ return sentinel_matches
@@ -39,6 +39,12 @@ def ingest(
39
39
  '--private',
40
40
  '-pr',
41
41
  help="Make dataset private"
42
+ ),
43
+ ignore_stac: bool = typer.Option(
44
+ False,
45
+ "--ignore-stac",
46
+ "-is",
47
+ help="Ignore STAC catalog.json (if found) for geneating metadata."
42
48
  )
43
49
  ):
44
50
  """
@@ -67,7 +73,7 @@ def ingest(
67
73
  $ eotdl dataset ingest --path /path/to/folder-with-dataset --verbose True
68
74
  """
69
75
  try:
70
- ingest_dataset(path, verbose, typer.echo, foce_metadata_update, sync_metadata, private)
76
+ ingest_dataset(path, verbose, typer.echo, foce_metadata_update, sync_metadata, private, ignore_stac)
71
77
  except Exception as e:
72
78
  typer.echo(e)
73
79
 
eotdl/datasets/ingest.py CHANGED
@@ -33,14 +33,17 @@ def ingest_dataset(
33
33
  force_metadata_update=False,
34
34
  sync_metadata=False,
35
35
  private=False,
36
+ ignore_stac=False,
36
37
  ):
37
38
  if private: print("Ingesting private dataset")
38
39
  path = Path(path)
39
40
  if not path.is_dir():
40
41
  raise Exception("Path must be a folder")
41
- if "catalog.json" in [f.name for f in path.iterdir()]:
42
+ if "catalog.json" in [f.name for f in path.iterdir()] and not ignore_stac:
43
+ print("Ingesting STAC catalog")
42
44
  prep_ingest_stac(path, logger)
43
45
  else:
46
+ print("Ingesting folder")
44
47
  prep_ingest_folder(path, verbose, logger, force_metadata_update, sync_metadata)
45
48
  return ingest(path, DatasetsAPIRepo(), retrieve_dataset, 'datasets', private)
46
49
 
eotdl/fe/ingest.py CHANGED
@@ -3,7 +3,7 @@ from pathlib import Path
3
3
  from ..repos import FEAPIRepo
4
4
  from ..files.ingest import prep_ingest_folder, ingest
5
5
 
6
- def retrieve_pipeline(metadata, user):
6
+ def retrieve_pipeline(metadata, user, private=False):
7
7
  repo = FEAPIRepo()
8
8
  data, error = repo.retrieve_pipeline(metadata.name)
9
9
  if data and data["uid"] != user["uid"]:
@@ -1,2 +1,2 @@
1
- from .basic_point_extraction import point_extraction
2
- from .advanced_patch_extraction import patch_extraction
1
+ from .basic_point_extraction import eurocrops_point_extraction
2
+ # from .advanced_patch_extraction import patch_extraction
@@ -2,19 +2,20 @@ from .dataframe_utils import *
2
2
  import openeo
3
3
  from openeo.extra.job_management import MultiBackendJobManager, CsvJobDatabase
4
4
 
5
- def start_job(row: pd.Series, connection: openeo.Connection, s1_weekly_statistics_url: str, s2_weekly_statistics_url: str, **kwargs) -> openeo.BatchJob:
5
+ # pass arguments in the row
6
+ def start_job(row: pd.Series, connection: openeo.Connection, **kwargs) -> openeo.BatchJob:
6
7
  temporal_extent = row["temporal_extent"]
7
8
  # set up load url in order to allow non-latlon feature collections for spatial filtering
8
9
  geometry = row["geometry"]
9
10
  #run the s1 and s2 udp
10
11
  s1 = connection.datacube_from_process(
11
- "s1_weekly_statistics",
12
- namespace=s1_weekly_statistics_url,
12
+ "s1_weekly_statistics", # depends on the json, so must be also a parameter
13
+ namespace=row["s1_weekly_statistics_url"],
13
14
  temporal_extent=temporal_extent,
14
15
  )
15
16
  s2 = connection.datacube_from_process(
16
17
  "s2_weekly_statistics",
17
- namespace=s2_weekly_statistics_url,
18
+ namespace=row["s2_weekly_statistics_url"],
18
19
  temporal_extent=temporal_extent,
19
20
  )
20
21
  #merge both cubes and filter across the feature collection
@@ -26,10 +27,8 @@ def start_job(row: pd.Series, connection: openeo.Connection, s1_weekly_statistic
26
27
  )
27
28
  return job
28
29
 
29
- def point_extraction(
30
+ def eurocrops_point_extraction(
30
31
  gdf,
31
- s1_weekly_statistics_url,
32
- s2_weekly_statistics_url,
33
32
  start_date,
34
33
  nb_months,
35
34
  extra_cols=[],
@@ -83,4 +82,4 @@ def point_extraction(
83
82
  if not job_db.exists():
84
83
  df = manager._normalize_df(job_df)
85
84
  job_db.persist(df)
86
- manager.run_jobs(start_job=start_job, job_db=job_db, s1_weekly_statistics_url=s1_weekly_statistics_url, s2_weekly_statistics_url=s2_weekly_statistics_url)
85
+ manager.run_jobs(start_job=start_job, job_db=job_db)
@@ -129,6 +129,8 @@ def process_geodataframe(
129
129
  "geometry": row.geometry,
130
130
  "crs": geodataframe.crs.to_string(),
131
131
  "temporal_extent": temporal_extent,
132
+ "s1_weekly_statistics_url": row.get("s1_weekly_statistics_url"),
133
+ "s2_weekly_statistics_url": row.get("s2_weekly_statistics_url"),
132
134
  **{col: row[col] for col in extra_cols}
133
135
  }
134
136
 
eotdl/files/__init__.py CHANGED
@@ -1 +1,2 @@
1
- from .get_url import get_file_url
1
+ from .get_url import get_file_url
2
+ from .get_file import get_file_content_url
@@ -0,0 +1,18 @@
1
+ from ..auth import with_auth
2
+ from ..repos import FilesAPIRepo
3
+ from ..datasets.retrieve import retrieve_dataset
4
+ from ..models.retrieve import retrieve_model
5
+ from ..fe.retrieve import retrieve_pipeline
6
+
7
+ @with_auth
8
+ def get_file_content_url(filename, dataset_or_model_name, endpoint, user):
9
+ if endpoint == "datasets":
10
+ dataset_or_model_id = retrieve_dataset(dataset_or_model_name)['id']
11
+ elif endpoint == "models":
12
+ dataset_or_model_id = retrieve_model(dataset_or_model_name)['id']
13
+ elif endpoint == "pipelines":
14
+ dataset_or_model_id = retrieve_pipeline(dataset_or_model_name)['id']
15
+ else:
16
+ raise Exception("Invalid endpoint (datasets, models or pipelines)")
17
+ repo = FilesAPIRepo()
18
+ return repo.generate_file_content_url(filename, dataset_or_model_id, user, endpoint)
eotdl/files/get_url.py CHANGED
@@ -1,8 +1,8 @@
1
1
  from ..auth import with_auth
2
2
  from ..repos import FilesAPIRepo
3
- from ..datasets import retrieve_dataset
4
- from ..models import retrieve_model
5
- from ..fe import retrieve_pipeline
3
+ from ..datasets.retrieve import retrieve_dataset
4
+ from ..models.retrieve import retrieve_model
5
+ from ..fe.retrieve import retrieve_pipeline
6
6
 
7
7
  @with_auth
8
8
  def get_file_url(filename, dataset_or_model_name, endpoint, user):
eotdl/files/ingest.py CHANGED
@@ -16,6 +16,23 @@ from ..files.metadata import Metadata
16
16
  from ..repos import FilesAPIRepo
17
17
  from ..shared import calculate_checksum
18
18
 
19
+ def fix_timestamp(item, field='created'):
20
+ if 'properties' in item.to_dict() and field in item.to_dict()['properties']:
21
+ created = item.to_dict()['properties'][field]
22
+ if isinstance(created, str):
23
+ # Parse and reformat the timestamp to ensure it's compatible with parquet
24
+ try:
25
+ # Parse the timestamp string
26
+ dt = datetime.fromisoformat(created.replace('Z', '+00:00'))
27
+ # Reformat to ISO format without microseconds if they cause issues
28
+ formatted_created = dt.strftime('%Y-%m-%dT%H:%M:%S+00:00')
29
+ item.properties[field] = formatted_created
30
+ except Exception as e:
31
+ print(f"Warning: Could not parse timestamp {created}: {e}")
32
+ # Remove problematic timestamp if parsing fails
33
+ item.properties.pop(field, None)
34
+ return item
35
+
19
36
  def prep_ingest_folder(
20
37
  folder,
21
38
  verbose=False,
@@ -31,7 +48,7 @@ def prep_ingest_folder(
31
48
  # ingest geometry from files (if tifs) or additional list of geometries
32
49
  # https://stac-utils.github.io/stac-geoparquet/latest/spec/stac-geoparquet-spec/#use-cases
33
50
  data = []
34
- for file in files:
51
+ for file in tqdm(files, total=len(files), desc="Preparing files"):
35
52
  file_path = Path(file)
36
53
  if file_path.is_file():
37
54
  relative_path = os.path.relpath(file_path, catalog_path.parent)
@@ -47,14 +64,18 @@ def prep_ingest_folder(
47
64
  def prep_ingest_stac(path, logger=None): # in theory should work with a remote catalog (given URL)
48
65
  # read stac catalog
49
66
  stac_catalog = path / "catalog.json"
67
+ print("Reading STAC catalog...", end="", flush=True)
50
68
  catalog = pystac.Catalog.from_file(stac_catalog)
69
+ print("done")
51
70
  # make all items paths hredf in assets absolute
52
71
  catalog.make_all_asset_hrefs_absolute()
53
72
  # generate list of items for all collections
73
+ print(f"Found {len(list(catalog.get_collections()))} collections")
54
74
  items = []
55
75
  for collection in catalog.get_collections():
76
+ print(f"Preparing items from collection {collection.id}", flush=True)
56
77
  # iterate over items
57
- for item in tqdm(collection.get_items(), desc=f"Ingesting items from collection {collection.id}"):
78
+ for item in tqdm(collection.get_items(), total=len(list(collection.get_items()))):
58
79
  assert isinstance(item, pystac.Item)
59
80
  # Process each asset in the item
60
81
  for asset in item.assets.values():
@@ -65,12 +86,22 @@ def prep_ingest_stac(path, logger=None): # in theory should work with a remote c
65
86
  asset.extra_fields['size'] = file_path.stat().st_size
66
87
  # Calculate and add checksum
67
88
  asset.extra_fields['checksum'] = calculate_checksum(str(file_path))
89
+ # print(asset.to_dict())
90
+ # Fix timestamp format in properties.created (did this to solve errors with charter challenge... but I guess people should fix their STAC metadata)
91
+ item = fix_timestamp(item, 'created')
92
+ item = fix_timestamp(item, 'updated')
68
93
  items.append(item)
69
94
  # save parquet file
70
- record_batch_reader = stac_geoparquet.arrow.parse_stac_items_to_arrow(items)
71
- output_path = stac_catalog.parent / "catalog.parquet"
72
- stac_geoparquet.arrow.to_parquet(record_batch_reader, output_path)
73
- return output_path
95
+ print("Saving parquet file...", end="", flush=True)
96
+ try:
97
+ record_batch_reader = stac_geoparquet.arrow.parse_stac_items_to_arrow(items)
98
+ output_path = stac_catalog.parent / "catalog.parquet"
99
+ stac_geoparquet.arrow.to_parquet(record_batch_reader, output_path)
100
+ print("done")
101
+ return output_path
102
+ except Exception as e:
103
+ print(f"\nError saving parquet file: {e}")
104
+ raise e
74
105
 
75
106
  def ingest_virtual( # could work for a list of paths with minimal changes...
76
107
  path,
@@ -138,12 +169,20 @@ def ingest(path, repo, retrieve, mode, private, user):
138
169
  total_size = 0
139
170
  for row in tqdm(gdf.iterrows(), total=len(gdf), desc="Ingesting files"):
140
171
  try:
172
+ assets_count = len(row[1]["assets"])
141
173
  for k, v in row[1]["assets"].items():
174
+ if not v: continue # skip empty assets
142
175
  if v["href"].startswith("http"): continue
143
176
  item_id = row[1]["id"]
177
+ # Determine file name based on number of assets
178
+ if assets_count == 1:
179
+ file_name = item_id
180
+ else:
181
+ file_name = f"{item_id}_{k}"
182
+ # print(f"Ingesting file {v['href']} with id {file_name}")
144
183
  data, error = files_repo.ingest_file(
145
184
  v["href"],
146
- item_id,
185
+ file_name,
147
186
  # Path(v["href"]).stat().st_size,
148
187
  dataset_or_model['id'],
149
188
  user,
@@ -151,7 +190,7 @@ def ingest(path, repo, retrieve, mode, private, user):
151
190
  )
152
191
  if error:
153
192
  raise Exception(error)
154
- file_url = f"{repo.url}{mode}/{dataset_or_model['id']}/stage/{item_id}"
193
+ file_url = f"{repo.url}{mode}/{dataset_or_model['id']}/stage/{file_name}"
155
194
  gdf.loc[row[0], "assets"][k]["href"] = file_url
156
195
  total_size += v["size"]
157
196
  except Exception as e:
@@ -172,30 +211,40 @@ def ingest(path, repo, retrieve, mode, private, user):
172
211
  total_size = 0
173
212
  for row in tqdm(gdf.iterrows(), total=len(gdf), desc="Ingesting files"):
174
213
  try:
214
+ item_id = row[1]["id"]
215
+ # check if item exists in previous versions
216
+ df = pd.read_parquet(
217
+ path=catalog_url,
218
+ filters=[('id', '=', item_id)]
219
+ )
220
+ exists = len(df) > 0
221
+ updated = False
222
+ assets_count = len(row[1]["assets"])
175
223
  for k, v in row[1]["assets"].items():
224
+ if not v: continue # skip empty assets
176
225
  if v["href"].startswith("http"): continue
177
- item_id = row[1]["id"]
178
- # check if file exists in previous versions
179
- df = pd.read_parquet(
180
- path=catalog_url,
181
- filters=[('id', '=', item_id)]
182
- )
183
- if len(df) > 0: # file exists in previous versions
226
+ if assets_count == 1:
227
+ file_name = item_id
228
+ else:
229
+ file_name = f"{item_id}_{k}"
230
+ if exists:
184
231
  if df.iloc[0]['assets'][k]["checksum"] == v["checksum"]: # file is the same
185
232
  # still need to update the required fields
186
- file_url = f"{repo.url}{mode}/{dataset_or_model['id']}/stage/{item_id}"
233
+ # file_url = f"{repo.url}{mode}/{dataset_or_model['id']}/stage/{file_name}"
234
+ file_url = df.iloc[0]['assets'][k]["href"] # keep previous file url to avoid overwriting
187
235
  gdf.loc[row[0], "assets"][k]["href"] = file_url
188
236
  total_size += v["size"]
189
237
  continue
190
- else: # file is different, so ingest new version but with a different id
191
- item_id = item_id + f"-{random.randint(1, 1000000)}"
192
- gdf.loc[row[0], "id"] = item_id
238
+ else: # file is different, so ingest new version but with a different name
239
+ file_name = file_name + f"-{random.randint(1, 1000000)}"
240
+ print(file_name)
241
+ updated = True
193
242
  new_version = True
194
243
  num_changes += 1
195
244
  # ingest new files
196
245
  data, error = files_repo.ingest_file(
197
246
  v["href"],
198
- item_id, # item id, will be path in local or given id in STAC. if not unique, will overwrite previous file in storage
247
+ file_name, # file_name, will be path in local or given id in STAC. if not unique, will overwrite previous file in storage
199
248
  # Path(v["href"]).stat().st_size,
200
249
  dataset_or_model['id'],
201
250
  user,
@@ -205,12 +254,28 @@ def ingest(path, repo, retrieve, mode, private, user):
205
254
  )
206
255
  if error:
207
256
  raise Exception(error)
208
- file_url = f"{repo.url}{mode}/{dataset_or_model['id']}/stage/{item_id}"
257
+ file_url = f"{repo.url}{mode}/{dataset_or_model['id']}/stage/{file_name}"
209
258
  gdf.loc[row[0], "assets"][k]["href"] = file_url
210
259
  total_size += v["size"]
260
+ # if exists and updated:
261
+ # if assets_count == 1:
262
+ # item_id = file_name
263
+ # else:
264
+ # item_id = item_id + f"-{random.randint(1, 1000000)}"
265
+ # gdf.loc[row[0], "id"] = item_id
211
266
  except Exception as e:
212
267
  print(f"Error uploading asset {row[0]}: {e}")
213
268
  break
269
+
270
+ # check for deleted files
271
+ df = pd.read_parquet(catalog_url)
272
+ rows_to_remove = df[~df['id'].isin(gdf['id'])]
273
+ if len(rows_to_remove) > 0:
274
+ ids_to_remove = rows_to_remove['id'].values
275
+ gdf = gdf[~gdf['id'].isin(ids_to_remove)]
276
+ new_version = True
277
+ num_changes += len(ids_to_remove)
278
+
214
279
  if not new_version:
215
280
  print("No new version was created, your dataset has not changed.")
216
281
  else:
@@ -219,7 +284,6 @@ def ingest(path, repo, retrieve, mode, private, user):
219
284
  print(f"Num changes: {num_changes}")
220
285
  gdf.to_parquet(catalog_path)
221
286
  files_repo.ingest_file(str(catalog_path), f'catalog.v{new_version}.parquet', dataset_or_model['id'], user, mode)
222
- # TODO: ingest README.md
223
287
  data, error = repo.complete_ingestion(dataset_or_model['id'], new_version, total_size, user)
224
288
  if error:
225
289
  raise Exception(error)
@@ -124,4 +124,8 @@ class FilesAPIRepo(APIRepo):
124
124
  if error:
125
125
  # print("ERROR generate_presigned_url", error)
126
126
  return None
127
- return data["presigned_url"]
127
+ return data["presigned_url"]
128
+
129
+ def generate_file_content_url(self, filename, dataset_or_model_id, user, endpoint="datasets"):
130
+ url = f"{self.url}{endpoint}/{dataset_or_model_id}/raw/{filename}"
131
+ return url
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: eotdl
3
- Version: 2025.5.26.post4
3
+ Version: 2025.6.27
4
4
  Summary: Earth Observation Training Data Lab
5
5
  Author-email: earthpulse <it@earthpulse.es>
6
6
  License-Expression: MIT
@@ -1,7 +1,8 @@
1
- eotdl/__init__.py,sha256=OqGQtGs5ppIGPW8iOhNmn9CtqFmEyndumjmfb-U3Qmo,29
1
+ eotdl/__init__.py,sha256=kd3MhaHKQ4k7aroB3hA4dP_YE8NpO237p2jAUP0SHJk,27
2
2
  eotdl/cli.py,sha256=MgRmnBcnPtRTW_nuvtH41y7MSjmVMzr1JOt9X-oDnt4,759
3
- eotdl/access/__init__.py,sha256=k-zmTwB6VLoWt_AsXx9CnEKdtONBZAaC8T6vqPMPSjk,436
3
+ eotdl/access/__init__.py,sha256=pG-ThTZgIqcmfCPlrhZiD7uvff6i8Bf6wwpUc74zMv8,519
4
4
  eotdl/access/download.py,sha256=e5H8LUkCfIVkFxJFM5EwCMG-R5DHVSHDGLvuNM5DNc8,2815
5
+ eotdl/access/matches.py,sha256=aT9M7EKDDniiEqOq8ITUNffEoaCHCN59cGi2PGYPl1o,1038
5
6
  eotdl/access/search.py,sha256=1indipTfna4VAfGlKb8gkaYyHAELdHR4cm1mVIDW69s,1415
6
7
  eotdl/access/airbus/__init__.py,sha256=G_kkRS9eFjXbQ-aehmTLXeAxh7zpAxz_rgB7J_w0NRg,107
7
8
  eotdl/access/airbus/client.py,sha256=zjfgB_NTsCCIszoQesYkyLJgheKg-eTh28vbleXYxfw,12018
@@ -19,7 +20,7 @@ eotdl/auth/is_logged.py,sha256=QREuhkoDnarZoUZwCxVCNoESGb_Yukh0lJo1pXvrV9Q,115
19
20
  eotdl/auth/logout.py,sha256=P_Sp6WmVvnG3R9V1L9541KNyHFko9DtQPqAKD2vaguw,161
20
21
  eotdl/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
22
  eotdl/commands/auth.py,sha256=sSfawOpht6ntToFXDJrOu11IATV9vN03Bqyg5LNGb1s,1646
22
- eotdl/commands/datasets.py,sha256=udmom8yqfDSdnBHkdbgV-WFJmhZGelMzXYMGCd_PWHY,5644
23
+ eotdl/commands/datasets.py,sha256=d-mp7REeFMYkuwTYpJcs3ADTK6826Wk_6qsTkxgGK3A,5832
23
24
  eotdl/commands/models.py,sha256=a6ozJC0MH7CsK2-ts6sN_9EFbT6bzoaJne8v83JjTzg,4942
24
25
  eotdl/commands/pipelines.py,sha256=uvdS7NFzxG95IiKKFf5ha4TgGZsN-LO7Pucmp0GWgnw,4396
25
26
  eotdl/commands/stac.py,sha256=jgjjkGw9tIvuovyAwfEu6B4ZMoMvEj5lrj_lO-9IqrE,1444
@@ -28,26 +29,27 @@ eotdl/curation/stac/__init__.py,sha256=FMi-qzd6v1sdsqZsBRRnedccaJFXJGCPCH3uTctyL
28
29
  eotdl/curation/stac/api.py,sha256=5XW9yzSxiuML70bBbzJ0DGx0GmplmhYtgeGZg029Qjk,1428
29
30
  eotdl/curation/stac/stac.py,sha256=4f7xrh2CcXTkTs3or1UMVxiFfwtVfTqH4YwTGsbi6No,1013
30
31
  eotdl/datasets/__init__.py,sha256=3FtdIDmSaCg4P15oI5d-7DDomuUEz4E3PjK78Ofvm3g,283
31
- eotdl/datasets/ingest.py,sha256=ylStVIOlFJcPlqjB9G9vcKqixc5D4W8qReltHTIjujE,1798
32
+ eotdl/datasets/ingest.py,sha256=67i-IyC-pud_vRheSUBnCSsUEyqff4ZZN2zLwbVuK9M,1900
32
33
  eotdl/datasets/retrieve.py,sha256=vQzociVOcD1A1ZTfFC42jzgEYVcc_y_HcvlALBqpAd8,1210
33
34
  eotdl/datasets/stage.py,sha256=dbmEV5VpaM1DEHhpCWTEgYAuCNUGaW0Ghm_oP8_COEg,2168
34
35
  eotdl/datasets/update.py,sha256=QmhXZwAOqz5ysc_VpCjFFDo5tpt0EKG44G1jYeQmbkQ,706
35
36
  eotdl/fe/__init__.py,sha256=nwm7HHfh_UrCnMMYaO2vqQ6y4ziVzAf6zt_RuvDSVF4,167
36
- eotdl/fe/ingest.py,sha256=JkVxPkHbltmvlRP6fmPKZu5lx8HJGOp9dSxrzkCv8tQ,1489
37
+ eotdl/fe/ingest.py,sha256=2hMGd6ansZBr-gD9cMaeMGh7z7oZ0zZXxZOYwcFGi58,1504
37
38
  eotdl/fe/retrieve.py,sha256=xrtmYcG14xgknu9yQJEdSjJMtSMoZkDU____95Yd6YY,448
38
39
  eotdl/fe/stage.py,sha256=wxmX_uyNc2kLvU_bF2SvyBBr05sPlTA7xYq0T4pSzEo,2001
39
40
  eotdl/fe/update.py,sha256=GO9ep8tAneGQrOseKdZnrADvNs1x9uhG4Q7Y5UhcLVI,354
40
- eotdl/fe/openeo/__init__.py,sha256=oag2hTyLcRKF-7SGkY-ZcbuftUtABZMNtn1-8gOYDik,108
41
+ eotdl/fe/openeo/__init__.py,sha256=7qenXgZHxAoCi5hZ78bsOIlOe3R39PWQnWOc14ocSao,120
41
42
  eotdl/fe/openeo/advanced_patch_extraction.py,sha256=jyfkHT8Co53xey2qbWimzYeB5kLwCEG_aCyK3OUefW4,5778
42
- eotdl/fe/openeo/basic_point_extraction.py,sha256=pUFZ34J9Ob21V9Ur6GLO3d3HGKYU5o6vjBtF3eR3JXs,4105
43
- eotdl/fe/openeo/dataframe_utils.py,sha256=QYG4xkJ6LER8q9lDD8ovd_pPcBb9dX2hbg4y5ZMMfW4,7981
43
+ eotdl/fe/openeo/basic_point_extraction.py,sha256=cvQFjsuVaCLZzGY_mLQS4otaoBQ9mnKndfPV_FnBBTk,3977
44
+ eotdl/fe/openeo/dataframe_utils.py,sha256=BCmC6ug3spHm3JUwopNzJU57eYJ_K3ggo34rw3RmSWE,8135
44
45
  eotdl/fe/openeo/s3proxy_utils.py,sha256=AH8cKyf8D8qM4UYXgBgZX7I2seKxhkhNjxQJpcqtxZI,6406
45
46
  eotdl/fe/openeo/spatial_utils.py,sha256=E1FfPnzgfBMhFd4XAjRl9FEZO3QnHf27tJjkZzze75Q,1008
46
47
  eotdl/fe/openeo/temporal_utils.py,sha256=R6epJqyhNH-c-wXVg8GgV3O_1DStk9hms5Oon4lPLH8,570
47
- eotdl/files/__init__.py,sha256=3guFFgXq6WEQ6X1VsVnEZEzya2kpQajaDvLpNSQboVA,33
48
- eotdl/files/get_url.py,sha256=gcD6E6cj9tsEsF2T5WdrFKK7rBjjjcGmWtyUc8IQEBo,772
48
+ eotdl/files/__init__.py,sha256=9T_1VOI77caJpAVSclrcuYtoRG3kZpmy4lgwA7QlWIE,76
49
+ eotdl/files/get_file.py,sha256=QsvWVH2sT7ExItixvLoVENYE-qVEEx-wDboj2e9dmCc,810
50
+ eotdl/files/get_url.py,sha256=YQgoWtSfSXezle1gDd69t7rqcrHo2GFx1qYJP-T7gxw,799
49
51
  eotdl/files/ingest.bck,sha256=dgjZfd-ACCKradDo2B02CPahwEhFtWvnKvTm372K5eo,6185
50
- eotdl/files/ingest.py,sha256=RwQ3WazTm3b600QXEiHsTDApIegxAxW-Vhw9GkGQFP0,9427
52
+ eotdl/files/ingest.py,sha256=V6W9S9CD1XIfD1hInsDg5SjHSQvfilx2tlzD96ANY7w,11893
51
53
  eotdl/files/metadata.py,sha256=MGrIvcgNr3AMI3j9Jcdyp5Q3Jcuth8m6Z14BCDxF0xI,1405
52
54
  eotdl/models/__init__.py,sha256=b6t1Z377On1F56c-GSy7FM_nBWRLHh1Ph2R24rPFiVY,239
53
55
  eotdl/models/download.py,sha256=rRT3fG-qS3-SXfzFdqy0cuiDnOIV9Du74JCnsbbA9Ps,3475
@@ -60,7 +62,7 @@ eotdl/repos/AuthAPIRepo.py,sha256=vYCqFawe3xUm2cx4SqVXCvzl8J_sr9rs_MkipYC0bXE,95
60
62
  eotdl/repos/AuthRepo.py,sha256=jpzzhINCcDZHRCyrPDsp49h17IlXp2HvX3BB3f5cnb4,1154
61
63
  eotdl/repos/DatasetsAPIRepo.py,sha256=t9kJvR3Hp82sc908sGZKY8XlmnOrghu4dAuDaSdE54U,2210
62
64
  eotdl/repos/FEAPIRepo.py,sha256=eGB3P8UGzV5kpopC-i49AvO_NlJqLaNtZ5W3yggyE_Y,1707
63
- eotdl/repos/FilesAPIRepo.py,sha256=iJWT2_7TIg8gTMAihui5mBbcUWF5THh-AuuLcfyMHGk,4756
65
+ eotdl/repos/FilesAPIRepo.py,sha256=E65y89qn5aIB4XQzegJptNKeduXc_W5tV7RrtELV92c,4954
64
66
  eotdl/repos/ModelsAPIRepo.py,sha256=mqXABLQmxuBa0SPFUcV05J3_4s0GXSSQM3SdSsgVUJw,1495
65
67
  eotdl/repos/STACAPIRepo.py,sha256=GJIrLkgVB-donToJlgOmaJbxDmXzIuwlmCb9R2yoRIA,1387
66
68
  eotdl/repos/__init__.py,sha256=I4_friwwZ4sSQxmguYY2uBNWvpKLaHUEN1i6Zib_WWU,291
@@ -76,7 +78,7 @@ eotdl/tools/time_utils.py,sha256=JHrQ3PxXkhwor8zcOFccf26zOG9WBtb9xHb6j-Fqa9k,466
76
78
  eotdl/tools/tools.py,sha256=Tl4_v2ejkQo_zyZek8oofJwoYcdVosdOwW1C0lvWaNM,6354
77
79
  eotdl/wrappers/__init__.py,sha256=IY3DK_5LMbc5bIQFleQA9kzFbPhWuTLesJ8dwfvpkdA,32
78
80
  eotdl/wrappers/models.py,sha256=kNO4pYw9KKKmElE7bZWWHGs7FIThNUXj8XciKh_3rNw,6432
79
- eotdl-2025.5.26.post4.dist-info/METADATA,sha256=IXFVHA349qDAlaRGxSlsp0sPzkNuzG_ere5hzfJSKu0,3371
80
- eotdl-2025.5.26.post4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
81
- eotdl-2025.5.26.post4.dist-info/entry_points.txt,sha256=FV4dFIZ5zdWj1q1nUEEip29n3sAgbviVOizEz00gEF0,40
82
- eotdl-2025.5.26.post4.dist-info/RECORD,,
81
+ eotdl-2025.6.27.dist-info/METADATA,sha256=R8XkgbX1lYNPv-LZ44uu_dwgF0JNdNkeWYIpotK02VY,3365
82
+ eotdl-2025.6.27.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
83
+ eotdl-2025.6.27.dist-info/entry_points.txt,sha256=FV4dFIZ5zdWj1q1nUEEip29n3sAgbviVOizEz00gEF0,40
84
+ eotdl-2025.6.27.dist-info/RECORD,,