eotdl 2024.10.7__py3-none-any.whl → 2025.3.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. eotdl/__init__.py +1 -1
  2. eotdl/access/search.py +0 -2
  3. eotdl/access/sentinelhub/parameters.py +1 -1
  4. eotdl/cli.py +2 -2
  5. eotdl/commands/datasets.py +28 -31
  6. eotdl/commands/models.py +27 -30
  7. eotdl/commands/stac.py +57 -0
  8. eotdl/curation/__init__.py +0 -8
  9. eotdl/curation/stac/__init__.py +1 -8
  10. eotdl/curation/stac/api.py +58 -0
  11. eotdl/curation/stac/stac.py +31 -341
  12. eotdl/datasets/__init__.py +1 -1
  13. eotdl/datasets/ingest.py +28 -159
  14. eotdl/datasets/retrieve.py +0 -9
  15. eotdl/datasets/stage.py +64 -0
  16. eotdl/files/__init__.py +0 -2
  17. eotdl/files/ingest.bck +178 -0
  18. eotdl/files/ingest.py +229 -164
  19. eotdl/{datasets → files}/metadata.py +16 -17
  20. eotdl/models/__init__.py +1 -1
  21. eotdl/models/ingest.py +28 -159
  22. eotdl/models/stage.py +60 -0
  23. eotdl/repos/APIRepo.py +1 -1
  24. eotdl/repos/DatasetsAPIRepo.py +56 -43
  25. eotdl/repos/FilesAPIRepo.py +260 -167
  26. eotdl/repos/STACAPIRepo.py +40 -0
  27. eotdl/repos/__init__.py +1 -0
  28. eotdl/tools/geo_utils.py +7 -2
  29. {eotdl-2024.10.7.dist-info → eotdl-2025.3.25.dist-info}/METADATA +5 -4
  30. eotdl-2025.3.25.dist-info/RECORD +65 -0
  31. {eotdl-2024.10.7.dist-info → eotdl-2025.3.25.dist-info}/WHEEL +1 -1
  32. eotdl/curation/stac/assets.py +0 -110
  33. eotdl/curation/stac/dataframe.py +0 -172
  34. eotdl/curation/stac/dataframe_bck.py +0 -253
  35. eotdl/curation/stac/dataframe_labeling.py +0 -63
  36. eotdl/curation/stac/extensions/__init__.py +0 -23
  37. eotdl/curation/stac/extensions/base.py +0 -30
  38. eotdl/curation/stac/extensions/dem.py +0 -18
  39. eotdl/curation/stac/extensions/eo.py +0 -117
  40. eotdl/curation/stac/extensions/label/__init__.py +0 -7
  41. eotdl/curation/stac/extensions/label/base.py +0 -136
  42. eotdl/curation/stac/extensions/label/image_name_labeler.py +0 -203
  43. eotdl/curation/stac/extensions/label/scaneo.py +0 -219
  44. eotdl/curation/stac/extensions/ml_dataset.py +0 -648
  45. eotdl/curation/stac/extensions/projection.py +0 -44
  46. eotdl/curation/stac/extensions/raster.py +0 -53
  47. eotdl/curation/stac/extensions/sar.py +0 -55
  48. eotdl/curation/stac/extent.py +0 -158
  49. eotdl/curation/stac/parsers.py +0 -61
  50. eotdl/datasets/download.py +0 -104
  51. eotdl/files/list_files.py +0 -13
  52. eotdl/models/download.py +0 -101
  53. eotdl/models/metadata.py +0 -43
  54. eotdl/wrappers/utils.py +0 -35
  55. eotdl-2024.10.7.dist-info/RECORD +0 -82
  56. {eotdl-2024.10.7.dist-info → eotdl-2025.3.25.dist-info}/entry_points.txt +0 -0
@@ -11,190 +11,283 @@ class FilesAPIRepo(APIRepo):
11
11
  def __init__(self, url=None):
12
12
  super().__init__(url)
13
13
 
14
- def ingest_files_batch(
15
- self,
16
- batch, # ziped batch of files
17
- checksums,
18
- dataset_or_model_id,
19
- user,
20
- endpoint,
21
- version=None,
22
- ):
23
- url = self.url + f"{endpoint}/{dataset_or_model_id}/batch"
24
- if version is not None:
25
- url += "?version=" + str(version)
26
- reponse = requests.post(
27
- url,
28
- files={"batch": ("batch.zip", batch)},
29
- data={"checksums": checksums},
30
- headers=self.generate_headers(user),
31
- )
32
- return self.format_response(reponse)
33
-
34
- def add_files_batch_to_version(
35
- self,
36
- batch,
37
- dataset_or_model_id,
38
- version,
39
- user,
40
- endpoint,
41
- ):
42
- reponse = requests.post(
43
- self.url + f"{endpoint}/{dataset_or_model_id}/files?version={str(version)}",
44
- data={
45
- "filenames": [f["path"] for f in batch],
46
- "checksums": [f["checksum"] for f in batch],
47
- },
48
- headers=self.generate_headers(user),
49
- )
50
- return self.format_response(reponse)
51
-
52
14
  def ingest_file(
53
- self, file, dataset_or_model_id, user, checksum, endpoint, version=None
15
+ self, file_path_or_bytes, file_name, dataset_or_model_id, user, endpoint, version=None
54
16
  ):
55
- # TODO: ingest file URL
56
17
  url = self.url + f"{endpoint}/{dataset_or_model_id}"
57
18
  if version is not None:
58
19
  url += "?version=" + str(version)
20
+ # get a presigned url to upload the file directly to the bucket
59
21
  reponse = requests.post(
60
22
  url,
61
- files={"file": open(file, "rb")},
62
- data={"checksum": checksum},
23
+ json={
24
+ "file_name": file_name,
25
+ # "file_size": files_size,
26
+ # "checksum": checksum
27
+ },
63
28
  headers=self.generate_headers(user),
64
29
  )
65
- return self.format_response(reponse)
30
+ data, error = self.format_response(reponse)
31
+ if error:
32
+ raise Exception(error)
33
+ # ingest the file
34
+ error = None
35
+ try:
36
+ presigned_url = data["presigned_url"]
37
+ if isinstance(file_path_or_bytes, (str, bytes)):
38
+ if isinstance(file_path_or_bytes, str):
39
+ # Handle file path
40
+ with open(file_path_or_bytes, 'rb') as f:
41
+ file_data = f.read()
42
+ else:
43
+ # Handle bytes directly
44
+ file_data = file_path_or_bytes
45
+ # Send file data to presigned URL
46
+ response = requests.put(presigned_url, data=file_data)
47
+ response.raise_for_status()
48
+ else:
49
+ raise TypeError("file_path_or_bytes must be either a file path string or bytes")
50
+ except Exception as e:
51
+ error = str(e)
52
+ return data, error
66
53
 
67
- def retrieve_files(self, dataset_or_model_id, endpoint, version=None):
68
- url = f"{self.url}{endpoint}/{dataset_or_model_id}/files"
69
- if version is not None:
70
- url += "?version=" + str(version)
71
- response = requests.get(url)
72
- return self.format_response(response)
73
-
74
- def download_file(
54
+ def stage_file(
75
55
  self,
76
56
  dataset_or_model_id,
77
57
  file_name,
78
58
  user,
79
59
  path,
80
- file_version,
81
60
  endpoint="datasets",
82
61
  progress=False,
83
62
  ):
84
- url = self.url + f"{endpoint}/{dataset_or_model_id}/download/{file_name}"
85
- if file_version is not None:
86
- url += "?version=" + str(file_version)
87
- return self.download_file_url(url, file_name, path, user, progress=progress)
88
-
89
- def download_file_url(self, url, filename, path, user, progress=False):
90
- headers = self.generate_headers(user)
91
- path = f"{path}/{filename}"
92
- for i in range(1, len(path.split("/")) - 1):
93
- # print("/".join(path.split("/")[: i + 1]))
94
- os.makedirs("/".join(path.split("/")[: i + 1]), exist_ok=True)
95
- with requests.get(url, headers=headers, stream=True) as r:
96
- r.raise_for_status()
97
- total_size = int(r.headers.get("content-length", 0))
98
- block_size = 1024 * 1024 * 10
99
- progress = progress and total_size > 1024 * 1024 * 16
100
- if progress:
101
- progress_bar = tqdm(
102
- total=total_size,
103
- unit="iB",
104
- unit_scale=True,
105
- unit_divisor=1024,
106
- position=1,
107
- )
108
- with open(path, "wb") as f:
109
- for chunk in r.iter_content(block_size):
110
- if progress:
111
- progress_bar.update(len(chunk))
112
- if chunk:
113
- f.write(chunk)
114
- if progress:
115
- progress_bar.close()
116
- return path
117
-
118
- def prepare_large_upload(
119
- self, filename, dataset_or_model_id, checksum, user, endpoint
120
- ):
121
- response = requests.post(
122
- self.url + f"{endpoint}/{dataset_or_model_id}/uploadId",
123
- json={"filname": filename, "checksum": checksum},
124
- headers=self.generate_headers(user),
125
- )
126
- if response.status_code != 200:
127
- raise Exception(response.json()["detail"])
128
- data = response.json()
129
- upload_id, parts = (
130
- data["upload_id"],
131
- data["parts"] if "parts" in data else [],
132
- )
133
- return upload_id, parts
134
-
135
- def get_chunk_size(self, content_size):
136
- # adapt chunk size to content size to avoid S3 limits (10000 parts, 500MB per part, 5TB per object)
137
- chunk_size = 1024 * 1024 * 10 # 10 MB (up to 100 GB, 10000 parts)
138
- if content_size >= 1024 * 1024 * 1024 * 100: # 100 GB
139
- chunk_size = 1024 * 1024 * 100 # 100 MB (up to 1 TB, 10000 parts)
140
- elif content_size >= 1024 * 1024 * 1024 * 1000: # 1 TB
141
- chunk_size = 1024 * 1024 * 500 # 0.5 GB (up to 5 TB, 10000 parts)
142
- return chunk_size
143
-
144
- def read_in_chunks(self, file_object, CHUNK_SIZE):
145
- while True:
146
- data = file_object.read(CHUNK_SIZE)
147
- if not data:
148
- break
149
- yield data
150
-
151
- def ingest_large_file(
152
- self, file_path, files_size, upload_id, user, parts, endpoint
63
+ url = self.url + f"{endpoint}/{dataset_or_model_id}/stage/{file_name}"
64
+ # if file_version is not None:
65
+ # url += "?version=" + str(file_version)
66
+ return self.stage_file_url(url, path, user)
67
+
68
+
69
+ def stage_file_url(
70
+ self,
71
+ url,
72
+ path,
73
+ user,
153
74
  ):
154
- print(endpoint)
155
- # content_path = os.path.abspath(file)
156
- # content_size = os.stat(content_path).st_size
157
- chunk_size = self.get_chunk_size(files_size)
158
- total_chunks = files_size // chunk_size
159
- # upload chunks sequentially
160
- pbar = tqdm(
161
- self.read_in_chunks(open(file_path, "rb"), chunk_size),
162
- total=total_chunks,
163
- )
164
- index = 0
165
- for chunk in pbar:
166
- part = index // chunk_size + 1
167
- offset = index + len(chunk)
168
- index = offset
169
- if part not in parts:
170
- checksum = hashlib.md5(chunk).hexdigest()
171
- response = requests.post(
172
- f"{self.url}{endpoint}/chunk/{upload_id}",
173
- files={"file": chunk},
174
- data={"part_number": part, "checksum": checksum},
175
- headers=self.generate_headers(user),
176
- )
177
- if response.status_code != 200:
178
- raise Exception(response.json()["detail"])
179
- pbar.set_description(
180
- "{:.2f}/{:.2f} MB".format(
181
- offset / 1024 / 1024, files_size / 1024 / 1024
182
- )
183
- )
184
- pbar.close()
185
- return
186
-
187
- def complete_upload(self, user, upload_id, version, endpoint):
188
- r = requests.post(
189
- f"{self.url}{endpoint}/complete/{upload_id}?version={version}",
190
- headers=self.generate_headers(user),
191
- )
192
- return self.format_response(r)
75
+ if '/stage/' in url: # asset is in EOTDL (can do better...)
76
+ file_name = url.split("/stage/")[-1]
77
+ reponse = requests.get(url, headers=self.generate_headers(user))
78
+ data, error = self.format_response(reponse)
79
+ if error:
80
+ raise Exception(error)
81
+ presigned_url = data["presigned_url"]
82
+ else:
83
+ file_name = url.split("//")[-1]
84
+ presigned_url = url
85
+ file_path = f"{path}/{file_name}"
86
+ for i in range(1, len(file_path.split("/")) - 1):
87
+ os.makedirs("/".join(file_path.split("/")[: i + 1]), exist_ok=True)
88
+ try:
89
+ response = requests.get(presigned_url)
90
+ response.raise_for_status() # This will raise an HTTPError for 4XX and 5XX status codes
91
+ with open(file_path, 'wb') as f:
92
+ f.write(response.content)
93
+ except requests.exceptions.HTTPError as e:
94
+ raise Exception(f"Failed to stage file: {str(e)}")
95
+ except Exception as e:
96
+ raise Exception(f"Unexpected error while staging file: {str(e)}")
97
+ return file_path
193
98
 
194
- def get_file_stream(self, dataset_id, filename, user, version=None):
195
- url = self.url + f"datasets/{dataset_id}/download/{filename}"
196
- if version is not None:
197
- url += "?version=" + str(version)
198
- headers = self.generate_headers(user)
199
- response = requests.get(url, headers=headers, stream=True)
200
- return BytesIO(response.content)
99
+ def generate_presigned_url(self, filename, dataset_or_model_id, user, endpoint="datasets"):
100
+ url = f"{self.url}{endpoint}/{dataset_or_model_id}/stage/{filename}"
101
+ reponse = requests.get(url, headers=self.generate_headers(user))
102
+ data, error = self.format_response(reponse)
103
+ if error:
104
+ print("ERROR generate_presigned_url", error)
105
+ return None
106
+ return data["presigned_url"]
107
+
108
+ # can we download large files?
109
+
110
+ # with requests.get(presigned_url, headers=headers, stream=True) as r:
111
+ # r.raise_for_status()
112
+ # total_size = int(r.headers.get("content-length", 0))
113
+ # block_size = 1024 * 1024 * 10
114
+ # progress = progress and total_size > 1024 * 1024 * 16
115
+ # if progress:
116
+ # progress_bar = tqdm(
117
+ # total=total_size,
118
+ # unit="iB",
119
+ # unit_scale=True,
120
+ # unit_divisor=1024,
121
+ # position=1,
122
+ # )
123
+ # with open(path, "wb") as f:
124
+ # for chunk in r.iter_content(block_size):
125
+ # if progress:
126
+ # progress_bar.update(len(chunk))
127
+ # if chunk:
128
+ # f.write(chunk)
129
+ # if progress:
130
+ # progress_bar.close()
131
+ # return path
132
+
133
+
134
+
135
+ # def ingest_files_batch(
136
+ # self,
137
+ # batch, # ziped batch of files
138
+ # checksums,
139
+ # dataset_or_model_id,
140
+ # user,
141
+ # endpoint,
142
+ # version=None,
143
+ # ):
144
+ # url = self.url + f"{endpoint}/{dataset_or_model_id}/batch"
145
+ # if version is not None:
146
+ # url += "?version=" + str(version)
147
+ # reponse = requests.post(
148
+ # url,
149
+ # files={"batch": ("batch.zip", batch)},
150
+ # data={"checksums": checksums},
151
+ # headers=self.generate_headers(user),
152
+ # )
153
+ # return self.format_response(reponse)
154
+
155
+ # def add_files_batch_to_version(
156
+ # self,
157
+ # batch,
158
+ # dataset_or_model_id,
159
+ # version,
160
+ # user,
161
+ # endpoint,
162
+ # ):
163
+ # reponse = requests.post(
164
+ # self.url + f"{endpoint}/{dataset_or_model_id}/files?version={str(version)}",
165
+ # data={
166
+ # "filenames": [f["path"] for f in batch],
167
+ # "checksums": [f["checksum"] for f in batch],
168
+ # },
169
+ # headers=self.generate_headers(user),
170
+ # )
171
+ # return self.format_response(reponse)
172
+
173
+ # def retrieve_files(self, dataset_or_model_id, endpoint, version=None):
174
+ # url = f"{self.url}{endpoint}/{dataset_or_model_id}/files"
175
+ # if version is not None:
176
+ # url += "?version=" + str(version)
177
+ # response = requests.get(url)
178
+ # return self.format_response(response)
179
+
180
+
181
+
182
+ # def download_file_url(self, url, filename, path, user, progress=False):
183
+ # headers = self.generate_headers(user)
184
+ # path = f"{path}/{filename}"
185
+ # for i in range(1, len(path.split("/")) - 1):
186
+ # # print("/".join(path.split("/")[: i + 1]))
187
+ # os.makedirs("/".join(path.split("/")[: i + 1]), exist_ok=True)
188
+ # with requests.get(url, headers=headers, stream=True) as r:
189
+ # r.raise_for_status()
190
+ # total_size = int(r.headers.get("content-length", 0))
191
+ # block_size = 1024 * 1024 * 10
192
+ # progress = progress and total_size > 1024 * 1024 * 16
193
+ # if progress:
194
+ # progress_bar = tqdm(
195
+ # total=total_size,
196
+ # unit="iB",
197
+ # unit_scale=True,
198
+ # unit_divisor=1024,
199
+ # position=1,
200
+ # )
201
+ # with open(path, "wb") as f:
202
+ # for chunk in r.iter_content(block_size):
203
+ # if progress:
204
+ # progress_bar.update(len(chunk))
205
+ # if chunk:
206
+ # f.write(chunk)
207
+ # if progress:
208
+ # progress_bar.close()
209
+ # return path
210
+
211
+ # def prepare_large_upload(
212
+ # self, filename, dataset_or_model_id, checksum, user, endpoint
213
+ # ):
214
+ # response = requests.post(
215
+ # self.url + f"{endpoint}/{dataset_or_model_id}/uploadId",
216
+ # json={"filname": filename, "checksum": checksum},
217
+ # headers=self.generate_headers(user),
218
+ # )
219
+ # if response.status_code != 200:
220
+ # raise Exception(response.json()["detail"])
221
+ # data = response.json()
222
+ # upload_id, parts = (
223
+ # data["upload_id"],
224
+ # data["parts"] if "parts" in data else [],
225
+ # )
226
+ # return upload_id, parts
227
+
228
+ # def get_chunk_size(self, content_size):
229
+ # # adapt chunk size to content size to avoid S3 limits (10000 parts, 500MB per part, 5TB per object)
230
+ # chunk_size = 1024 * 1024 * 10 # 10 MB (up to 100 GB, 10000 parts)
231
+ # if content_size >= 1024 * 1024 * 1024 * 100: # 100 GB
232
+ # chunk_size = 1024 * 1024 * 100 # 100 MB (up to 1 TB, 10000 parts)
233
+ # elif content_size >= 1024 * 1024 * 1024 * 1000: # 1 TB
234
+ # chunk_size = 1024 * 1024 * 500 # 0.5 GB (up to 5 TB, 10000 parts)
235
+ # return chunk_size
236
+
237
+ # def read_in_chunks(self, file_object, CHUNK_SIZE):
238
+ # while True:
239
+ # data = file_object.read(CHUNK_SIZE)
240
+ # if not data:
241
+ # break
242
+ # yield data
243
+
244
+ # def ingest_large_file(
245
+ # self, file_path, files_size, upload_id, user, parts, endpoint
246
+ # ):
247
+ # print(endpoint)
248
+ # # content_path = os.path.abspath(file)
249
+ # # content_size = os.stat(content_path).st_size
250
+ # chunk_size = self.get_chunk_size(files_size)
251
+ # total_chunks = files_size // chunk_size
252
+ # # upload chunks sequentially
253
+ # pbar = tqdm(
254
+ # self.read_in_chunks(open(file_path, "rb"), chunk_size),
255
+ # total=total_chunks,
256
+ # )
257
+ # index = 0
258
+ # for chunk in pbar:
259
+ # part = index // chunk_size + 1
260
+ # offset = index + len(chunk)
261
+ # index = offset
262
+ # if part not in parts:
263
+ # checksum = hashlib.md5(chunk).hexdigest()
264
+ # response = requests.post(
265
+ # f"{self.url}{endpoint}/chunk/{upload_id}",
266
+ # files={"file": chunk},
267
+ # data={"part_number": part, "checksum": checksum},
268
+ # headers=self.generate_headers(user),
269
+ # )
270
+ # if response.status_code != 200:
271
+ # raise Exception(response.json()["detail"])
272
+ # pbar.set_description(
273
+ # "{:.2f}/{:.2f} MB".format(
274
+ # offset / 1024 / 1024, files_size / 1024 / 1024
275
+ # )
276
+ # )
277
+ # pbar.close()
278
+ # return
279
+
280
+ # def complete_upload(self, user, upload_id, version, endpoint):
281
+ # r = requests.post(
282
+ # f"{self.url}{endpoint}/complete/{upload_id}?version={version}",
283
+ # headers=self.generate_headers(user),
284
+ # )
285
+ # return self.format_response(r)
286
+
287
+ # def get_file_stream(self, dataset_id, filename, user, version=None):
288
+ # url = self.url + f"datasets/{dataset_id}/download/{filename}"
289
+ # if version is not None:
290
+ # url += "?version=" + str(version)
291
+ # headers = self.generate_headers(user)
292
+ # response = requests.get(url, headers=headers, stream=True)
293
+ # return BytesIO(response.content)
@@ -0,0 +1,40 @@
1
+ import requests
2
+ import geopandas as gpd
3
+
4
+ from ..repos import APIRepo
5
+
6
+
7
+ class STACAPIRepo(APIRepo):
8
+ def __init__(self, url=None):
9
+ super().__init__(url)
10
+
11
+ def status(self):
12
+ response = requests.get(self.url + "stac")
13
+ return self.format_response(response)
14
+
15
+ def collections(self):
16
+ response = requests.get(self.url + "stac/collections")
17
+ return self.format_response(response)
18
+
19
+ def collection(self, collection_id):
20
+ response = requests.get(self.url + f"stac/collections/{collection_id}")
21
+ return self.format_response(response)
22
+
23
+ def items(self, collection_id):
24
+ response = requests.get(self.url + f"stac/collections/{collection_id}/items")
25
+ return self.format_response(response)
26
+
27
+ def item(self, collection_id, item_id):
28
+ response = requests.get(self.url + f"stac/collections/{collection_id}/items/{item_id}")
29
+ return self.format_response(response)
30
+
31
+ def search(self, collection_id, query):
32
+ body = {"collection_id": collection_id}
33
+ if query is not None:
34
+ body["query"] = query
35
+ response = requests.post(self.url + f"stac/search", json=body)
36
+ return self.format_response(response)
37
+
38
+ def search_columns(self, collection_id):
39
+ response = requests.get(self.url + f"stac/search?collection={collection_id}")
40
+ return self.format_response(response)
eotdl/repos/__init__.py CHANGED
@@ -4,3 +4,4 @@ from .AuthAPIRepo import AuthAPIRepo
4
4
  from .DatasetsAPIRepo import DatasetsAPIRepo
5
5
  from .FilesAPIRepo import FilesAPIRepo
6
6
  from .ModelsAPIRepo import ModelsAPIRepo
7
+ from .STACAPIRepo import STACAPIRepo
eotdl/tools/geo_utils.py CHANGED
@@ -127,6 +127,11 @@ def bbox_from_centroid(
127
127
  width_m = width * pixel_size
128
128
  heigth_m = height * pixel_size
129
129
 
130
+ # Initialise the transformers
131
+ utm_crs = CRS.get_utm_from_wgs84(y, x).ogc_string()
132
+ from_4326_transformer = Transformer.from_crs("EPSG:4326", utm_crs)
133
+ to_4326_transformer = Transformer.from_crs(utm_crs, "EPSG:4326")
134
+
130
135
  # Transform the centroid coordinates to meters
131
136
  centroid_m = from_4326_transformer.transform(x, y)
132
137
 
@@ -137,8 +142,8 @@ def bbox_from_centroid(
137
142
  max_y = centroid_m[1] + heigth_m / 2
138
143
 
139
144
  # Convert the bounding box coordinates back to degrees
140
- min_x, min_y = from_3857_transformer.transform(min_x, min_y)
141
- max_x, max_y = from_3857_transformer.transform(max_x, max_y)
145
+ min_x, min_y = to_4326_transformer.transform(min_x, min_y)
146
+ max_x, max_y = to_4326_transformer.transform(max_x, max_y)
142
147
 
143
148
  return [min_y, min_x, max_y, max_x]
144
149
 
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.3
2
2
  Name: eotdl
3
- Version: 2024.10.7
3
+ Version: 2025.3.25
4
4
  Summary: Earth Observation Training Data Lab
5
5
  License: MIT
6
6
  Author: EarthPulse
@@ -13,6 +13,7 @@ Classifier: Programming Language :: Python :: 3.9
13
13
  Classifier: Programming Language :: Python :: 3.10
14
14
  Classifier: Programming Language :: Python :: 3.11
15
15
  Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Programming Language :: Python :: 3.13
16
17
  Requires-Dist: black (>=23.10.1,<24.0.0)
17
18
  Requires-Dist: geomet (>=1.0.0,<2.0.0)
18
19
  Requires-Dist: geopandas (>=0.13.2,<0.14.0)
@@ -49,10 +50,10 @@ Description-Content-Type: text/markdown
49
50
 
50
51
  This is the main library and CLI for the **Earth Observation Training Data Lab** (EOTDL), a complete environment that allows you, among other things, to:
51
52
 
52
- - Explore and download Training Datasets (TDS) for Earth Observation (EO) applications.
53
+ - Explore and stage Training Datasets (TDS) for Earth Observation (EO) applications.
53
54
  - Create and upload your own TDS by combining and annotating EO data from different sources.
54
55
  - Train Machine Learning (ML) models using the hosted TDS in the cloud with multi-GPU machines.
55
- - Explore and download pre-trianed ML models for EO applications.
56
+ - Explore and stage pre-trianed ML models for EO applications.
56
57
 
57
58
  In our blog you will find tutorials to learn how leverage the EOTDL to create and use TDS and ML models for your own EO applications.
58
59
 
@@ -0,0 +1,65 @@
1
+ eotdl/__init__.py,sha256=5OBv4eVG7bKLTl5AtMeoKzr4tllf7-R_Vp-xqXk87VQ,27
2
+ eotdl/access/__init__.py,sha256=jbyjD7BRGJURlTNmtcbBBhw3Xk4EiZvkqmEykM-bJ1k,231
3
+ eotdl/access/airbus/__init__.py,sha256=G_kkRS9eFjXbQ-aehmTLXeAxh7zpAxz_rgB7J_w0NRg,107
4
+ eotdl/access/airbus/client.py,sha256=zjfgB_NTsCCIszoQesYkyLJgheKg-eTh28vbleXYxfw,12018
5
+ eotdl/access/airbus/parameters.py,sha256=Z8XIrxG5wAOuOoH-fkdKfdNMEMLFp6PaxJN7v4MefMI,1009
6
+ eotdl/access/airbus/utils.py,sha256=oh_N1Rn4fhcvUgNPpH2QzVvpe4bA0gqRgNguzRVqUps,652
7
+ eotdl/access/download.py,sha256=DgemJKafNOlCUVW8OxpSP4br9ij5F1iSrSD-x0B5qFU,1845
8
+ eotdl/access/search.py,sha256=JW4MnM3xbXxvsaNCFkRKxPhxhNKJgZAutE2wna6qUpo,631
9
+ eotdl/access/sentinelhub/__init__.py,sha256=YpvaUBTRXM26WrXipo51ZUBCDv9WjRIdT8l1Pklpt_M,238
10
+ eotdl/access/sentinelhub/client.py,sha256=g40avqlUpIa-WLjD7tK8CL8_SohBA2v3m8NZ0KbIFxc,4098
11
+ eotdl/access/sentinelhub/evalscripts.py,sha256=m6cnZ6ryXHgdH2B7RDVSlDHXWfvKi7HMGkTHXEcJsTw,4142
12
+ eotdl/access/sentinelhub/parameters.py,sha256=SEal7mCPkADc7lhQL-63t2h5-XCssYpGMvK5Eo3etFU,2078
13
+ eotdl/access/sentinelhub/utils.py,sha256=X9Q1YvErBdMsRKszXyaOaG6ZMvPdM2Nl_0SH-dWSFo0,3560
14
+ eotdl/auth/__init__.py,sha256=OuGNfJQ-8Kymn4zIywlHQfImEO8DJMJIwOwTQm-u_dc,99
15
+ eotdl/auth/auth.py,sha256=EjbVFREA2H0sjFJhVqjFZrwjKPzxRJ2x83MTjizpRBs,2029
16
+ eotdl/auth/errors.py,sha256=E1lv3Igk--J-SOgNH18i8Xx9bXrrMyBSHKt_CAUmGPo,308
17
+ eotdl/auth/is_logged.py,sha256=QREuhkoDnarZoUZwCxVCNoESGb_Yukh0lJo1pXvrV9Q,115
18
+ eotdl/auth/logout.py,sha256=P_Sp6WmVvnG3R9V1L9541KNyHFko9DtQPqAKD2vaguw,161
19
+ eotdl/cli.py,sha256=1wtNmiuqjwDB1Me-eSio--dnOabrbdpMiO9dQoIbOoc,702
20
+ eotdl/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
+ eotdl/commands/auth.py,sha256=WzA0aFGRoscy7fPKQTxiphBc0LJztJxBBl7rjDBRVfI,1544
22
+ eotdl/commands/datasets.py,sha256=rFdimg_AOp_sf8N1dPHOfwHoqiGBXPOY7bHhg3022v0,5208
23
+ eotdl/commands/models.py,sha256=Me1xyCHHO9Wy-Nd_p4KY09l6pYwUPrBbm6hsYwqHQKY,4864
24
+ eotdl/commands/stac.py,sha256=Nt7WDzANgcaxJYwr-5XOv887jLrYXlut5dHbh5rKNPU,1440
25
+ eotdl/curation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
+ eotdl/curation/stac/__init__.py,sha256=FMi-qzd6v1sdsqZsBRRnedccaJFXJGCPCH3uTctyLYU,37
27
+ eotdl/curation/stac/api.py,sha256=wAn_oDuvGJOjHOSauLIiXZ9ym8n4jyk-OTIGvCcRAzo,1424
28
+ eotdl/curation/stac/stac.py,sha256=eap9aqdLepEdhkoY7PCD1VD-4KwaU7F4rLMeIo7c7F4,1011
29
+ eotdl/datasets/__init__.py,sha256=IKEcRM6TFgEU5mnT-VrR90TGJgyDp1nby1qrdshy9wk,170
30
+ eotdl/datasets/ingest.py,sha256=7hFkCZuN2fNbiJ0hX0bqVgx2k5vVtxwDqjmtquR7CFs,1008
31
+ eotdl/datasets/retrieve.py,sha256=dhNbBJu0vE0l-LsGQNQF5Vc_WzZDRbXPzvd66GNlV6U,691
32
+ eotdl/datasets/stage.py,sha256=7FYAG_k41WYyXilW8gEZ7f77DUkMkqAeHNAqDB6VrY8,2004
33
+ eotdl/datasets/update.py,sha256=x-rpfxnavn9X-7QYkFMGtbn1b3bKmAZydOeS7Tjr5AQ,386
34
+ eotdl/files/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
+ eotdl/files/ingest.bck,sha256=dgjZfd-ACCKradDo2B02CPahwEhFtWvnKvTm372K5eo,6185
36
+ eotdl/files/ingest.py,sha256=2wEtp6x4jToUkFXW6qREg-C1KzmxYDGGPKGQr8B3LRk,8915
37
+ eotdl/files/metadata.py,sha256=C-NDr-zjM58fP8QcHB1N1QyLRUeYyMbT6wPPnxGk8LI,1370
38
+ eotdl/models/__init__.py,sha256=5mriPl64bzRwcY4O4Bnjt2WeKpX4ab5yXZ3uKzapJfA,158
39
+ eotdl/models/ingest.py,sha256=OnXpYHABJHHIGnu-qSunCb5Usg8pXZFhrHbCVZJx7lk,981
40
+ eotdl/models/retrieve.py,sha256=-Ij7dT4J1p7MW4n13OlPB9OW4tBaBXPwk9dW8IuCZPc,664
41
+ eotdl/models/stage.py,sha256=nlLL5kYOMJPDPR3orhm5ZOZnZrqw0Q37nV6gWylBIbE,1805
42
+ eotdl/models/update.py,sha256=4FWeD95cXvRpefRjw3Foqb30e30otxqWUZ6nQM9cbmM,374
43
+ eotdl/repos/APIRepo.py,sha256=fcMpVbatfJgAq12bGWM828n8UDOixBbf5ueleB_Hrc4,791
44
+ eotdl/repos/AuthAPIRepo.py,sha256=vYCqFawe3xUm2cx4SqVXCvzl8J_sr9rs_MkipYC0bXE,957
45
+ eotdl/repos/AuthRepo.py,sha256=jpzzhINCcDZHRCyrPDsp49h17IlXp2HvX3BB3f5cnb4,1154
46
+ eotdl/repos/DatasetsAPIRepo.py,sha256=Yy22IoiASPmca93r4Rt5lzq28TFQkq3aOl_M4u8VJw8,3236
47
+ eotdl/repos/FilesAPIRepo.py,sha256=le8Xzt1zgB3MmLYoCN46zxcFLQvJXJoOeXQULj1DkCI,11079
48
+ eotdl/repos/ModelsAPIRepo.py,sha256=79euf5WsfUxG5KSIGhKT8T7kSl-NtISwxvqHnck-bq0,2616
49
+ eotdl/repos/STACAPIRepo.py,sha256=YtLd-Wl2mOM4MtT7nCFHd26oeNleq9POKajJuhEt-74,1407
50
+ eotdl/repos/__init__.py,sha256=GIzk62681dvNzYgVzvJgrMzVRhrep4-kJH6lTOtfnT8,258
51
+ eotdl/shared/__init__.py,sha256=mF7doJC8Z5eTPmB01UQvPivThZac32DRY33T6qshXfg,41
52
+ eotdl/shared/checksum.py,sha256=4IB6N9jRO0chMDNJzpdnFDhC9wcFF9bO5oHq2HodcHw,479
53
+ eotdl/tools/__init__.py,sha256=_p3n2dw3ulwyr1OlVw5d_jMV64cNYfajQMUbzFfvIpU,178
54
+ eotdl/tools/geo_utils.py,sha256=JKHUAnqkwiIrvh5voDclWAW-i57qVqH2FUjeOt1TQf4,7547
55
+ eotdl/tools/metadata.py,sha256=RvNmoMdfEKoo-DzhEAqL-f9ZCjIe_bsdHQwACMk6w1E,1664
56
+ eotdl/tools/paths.py,sha256=yWhOtVxX4NxrDrrBX2fuye5N1mAqrxXFy_eA7dffd84,1152
57
+ eotdl/tools/stac.py,sha256=ovXdrPm4Sn9AAJmrP88WnxDmq2Ut-xPoscjphxz3Iyo,5763
58
+ eotdl/tools/time_utils.py,sha256=qJ3-rk1I7ne722SLfAP6-59kahQ0vLQqIf9VpOi0Kpg,4691
59
+ eotdl/tools/tools.py,sha256=Tl4_v2ejkQo_zyZek8oofJwoYcdVosdOwW1C0lvWaNM,6354
60
+ eotdl/wrappers/__init__.py,sha256=IY3DK_5LMbc5bIQFleQA9kzFbPhWuTLesJ8dwfvpkdA,32
61
+ eotdl/wrappers/models.py,sha256=kNO4pYw9KKKmElE7bZWWHGs7FIThNUXj8XciKh_3rNw,6432
62
+ eotdl-2025.3.25.dist-info/METADATA,sha256=zGb2kpfJo_dacJpUd4_GGaznMSJ3QamvXlbbqGP-Iak,4189
63
+ eotdl-2025.3.25.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
64
+ eotdl-2025.3.25.dist-info/entry_points.txt,sha256=s6sfxUfRrSX2IP2UbrzTFTvRCtLgw3_OKcHlOKf_5F8,39
65
+ eotdl-2025.3.25.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.9.0
2
+ Generator: poetry-core 2.1.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any