eotdl 2025.3.25__py3-none-any.whl → 2025.4.2.post2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,9 +1,7 @@
1
1
  import requests
2
- import geopandas as gpd
3
2
 
4
3
  from ..repos import APIRepo
5
4
 
6
-
7
5
  class DatasetsAPIRepo(APIRepo):
8
6
  def __init__(self, url=None):
9
7
  super().__init__(url)
@@ -43,51 +41,3 @@ class DatasetsAPIRepo(APIRepo):
43
41
  headers=self.generate_headers(user),
44
42
  )
45
43
  return self.format_response(response)
46
-
47
-
48
- # def create_version(self, dataset_id, user):
49
- # response = requests.post(
50
- # self.url + "datasets/version/" + dataset_id,
51
- # headers=self.generate_headers(user),
52
- # )
53
- # return self.format_response(response)
54
-
55
- # def create_stac_dataset(self, name, user):
56
- # response = requests.post(
57
- # self.url + "datasets/stac",
58
- # json={"name": name},
59
- # headers=self.generate_headers(user),
60
- # )
61
- # return self.format_response(response)
62
-
63
- # def ingest_stac(self, stac_json, dataset_id, user):
64
- # response = requests.put(
65
- # self.url + f"datasets/stac/{dataset_id}",
66
- # json={"stac": stac_json},
67
- # headers=self.generate_headers(user),
68
- # )
69
- # return self.format_response(response)
70
-
71
- # def download_stac(self, dataset_id, user):
72
- # url = self.url + "datasets/" + dataset_id + "/download"
73
- # headers = self.generate_headers(user)
74
- # response = requests.get(url, headers=headers)
75
- # if response.status_code != 200:
76
- # return None, response.json()["detail"]
77
- # return gpd.GeoDataFrame.from_features(response.json()["features"]), None
78
-
79
- # def update_dataset(
80
- # self, dataset_id, authors, source, license, thumbnail, content, user
81
- # ):
82
- # response = requests.put(
83
- # self.url + f"datasets/{dataset_id}",
84
- # json={
85
- # "authors": authors,
86
- # "source": source,
87
- # "license": license,
88
- # "thumbnail": thumbnail,
89
- # "description": content,
90
- # },
91
- # headers=self.generate_headers(user),
92
- # )
93
- # return self.format_response(response)
@@ -1,8 +1,5 @@
1
1
  import requests
2
2
  import os
3
- from tqdm import tqdm
4
- import hashlib
5
- from io import BytesIO
6
3
 
7
4
  from ..repos import APIRepo
8
5
 
@@ -101,193 +98,6 @@ class FilesAPIRepo(APIRepo):
101
98
  reponse = requests.get(url, headers=self.generate_headers(user))
102
99
  data, error = self.format_response(reponse)
103
100
  if error:
104
- print("ERROR generate_presigned_url", error)
101
+ # print("ERROR generate_presigned_url", error)
105
102
  return None
106
- return data["presigned_url"]
107
-
108
- # can we download large files?
109
-
110
- # with requests.get(presigned_url, headers=headers, stream=True) as r:
111
- # r.raise_for_status()
112
- # total_size = int(r.headers.get("content-length", 0))
113
- # block_size = 1024 * 1024 * 10
114
- # progress = progress and total_size > 1024 * 1024 * 16
115
- # if progress:
116
- # progress_bar = tqdm(
117
- # total=total_size,
118
- # unit="iB",
119
- # unit_scale=True,
120
- # unit_divisor=1024,
121
- # position=1,
122
- # )
123
- # with open(path, "wb") as f:
124
- # for chunk in r.iter_content(block_size):
125
- # if progress:
126
- # progress_bar.update(len(chunk))
127
- # if chunk:
128
- # f.write(chunk)
129
- # if progress:
130
- # progress_bar.close()
131
- # return path
132
-
133
-
134
-
135
- # def ingest_files_batch(
136
- # self,
137
- # batch, # ziped batch of files
138
- # checksums,
139
- # dataset_or_model_id,
140
- # user,
141
- # endpoint,
142
- # version=None,
143
- # ):
144
- # url = self.url + f"{endpoint}/{dataset_or_model_id}/batch"
145
- # if version is not None:
146
- # url += "?version=" + str(version)
147
- # reponse = requests.post(
148
- # url,
149
- # files={"batch": ("batch.zip", batch)},
150
- # data={"checksums": checksums},
151
- # headers=self.generate_headers(user),
152
- # )
153
- # return self.format_response(reponse)
154
-
155
- # def add_files_batch_to_version(
156
- # self,
157
- # batch,
158
- # dataset_or_model_id,
159
- # version,
160
- # user,
161
- # endpoint,
162
- # ):
163
- # reponse = requests.post(
164
- # self.url + f"{endpoint}/{dataset_or_model_id}/files?version={str(version)}",
165
- # data={
166
- # "filenames": [f["path"] for f in batch],
167
- # "checksums": [f["checksum"] for f in batch],
168
- # },
169
- # headers=self.generate_headers(user),
170
- # )
171
- # return self.format_response(reponse)
172
-
173
- # def retrieve_files(self, dataset_or_model_id, endpoint, version=None):
174
- # url = f"{self.url}{endpoint}/{dataset_or_model_id}/files"
175
- # if version is not None:
176
- # url += "?version=" + str(version)
177
- # response = requests.get(url)
178
- # return self.format_response(response)
179
-
180
-
181
-
182
- # def download_file_url(self, url, filename, path, user, progress=False):
183
- # headers = self.generate_headers(user)
184
- # path = f"{path}/{filename}"
185
- # for i in range(1, len(path.split("/")) - 1):
186
- # # print("/".join(path.split("/")[: i + 1]))
187
- # os.makedirs("/".join(path.split("/")[: i + 1]), exist_ok=True)
188
- # with requests.get(url, headers=headers, stream=True) as r:
189
- # r.raise_for_status()
190
- # total_size = int(r.headers.get("content-length", 0))
191
- # block_size = 1024 * 1024 * 10
192
- # progress = progress and total_size > 1024 * 1024 * 16
193
- # if progress:
194
- # progress_bar = tqdm(
195
- # total=total_size,
196
- # unit="iB",
197
- # unit_scale=True,
198
- # unit_divisor=1024,
199
- # position=1,
200
- # )
201
- # with open(path, "wb") as f:
202
- # for chunk in r.iter_content(block_size):
203
- # if progress:
204
- # progress_bar.update(len(chunk))
205
- # if chunk:
206
- # f.write(chunk)
207
- # if progress:
208
- # progress_bar.close()
209
- # return path
210
-
211
- # def prepare_large_upload(
212
- # self, filename, dataset_or_model_id, checksum, user, endpoint
213
- # ):
214
- # response = requests.post(
215
- # self.url + f"{endpoint}/{dataset_or_model_id}/uploadId",
216
- # json={"filname": filename, "checksum": checksum},
217
- # headers=self.generate_headers(user),
218
- # )
219
- # if response.status_code != 200:
220
- # raise Exception(response.json()["detail"])
221
- # data = response.json()
222
- # upload_id, parts = (
223
- # data["upload_id"],
224
- # data["parts"] if "parts" in data else [],
225
- # )
226
- # return upload_id, parts
227
-
228
- # def get_chunk_size(self, content_size):
229
- # # adapt chunk size to content size to avoid S3 limits (10000 parts, 500MB per part, 5TB per object)
230
- # chunk_size = 1024 * 1024 * 10 # 10 MB (up to 100 GB, 10000 parts)
231
- # if content_size >= 1024 * 1024 * 1024 * 100: # 100 GB
232
- # chunk_size = 1024 * 1024 * 100 # 100 MB (up to 1 TB, 10000 parts)
233
- # elif content_size >= 1024 * 1024 * 1024 * 1000: # 1 TB
234
- # chunk_size = 1024 * 1024 * 500 # 0.5 GB (up to 5 TB, 10000 parts)
235
- # return chunk_size
236
-
237
- # def read_in_chunks(self, file_object, CHUNK_SIZE):
238
- # while True:
239
- # data = file_object.read(CHUNK_SIZE)
240
- # if not data:
241
- # break
242
- # yield data
243
-
244
- # def ingest_large_file(
245
- # self, file_path, files_size, upload_id, user, parts, endpoint
246
- # ):
247
- # print(endpoint)
248
- # # content_path = os.path.abspath(file)
249
- # # content_size = os.stat(content_path).st_size
250
- # chunk_size = self.get_chunk_size(files_size)
251
- # total_chunks = files_size // chunk_size
252
- # # upload chunks sequentially
253
- # pbar = tqdm(
254
- # self.read_in_chunks(open(file_path, "rb"), chunk_size),
255
- # total=total_chunks,
256
- # )
257
- # index = 0
258
- # for chunk in pbar:
259
- # part = index // chunk_size + 1
260
- # offset = index + len(chunk)
261
- # index = offset
262
- # if part not in parts:
263
- # checksum = hashlib.md5(chunk).hexdigest()
264
- # response = requests.post(
265
- # f"{self.url}{endpoint}/chunk/{upload_id}",
266
- # files={"file": chunk},
267
- # data={"part_number": part, "checksum": checksum},
268
- # headers=self.generate_headers(user),
269
- # )
270
- # if response.status_code != 200:
271
- # raise Exception(response.json()["detail"])
272
- # pbar.set_description(
273
- # "{:.2f}/{:.2f} MB".format(
274
- # offset / 1024 / 1024, files_size / 1024 / 1024
275
- # )
276
- # )
277
- # pbar.close()
278
- # return
279
-
280
- # def complete_upload(self, user, upload_id, version, endpoint):
281
- # r = requests.post(
282
- # f"{self.url}{endpoint}/complete/{upload_id}?version={version}",
283
- # headers=self.generate_headers(user),
284
- # )
285
- # return self.format_response(r)
286
-
287
- # def get_file_stream(self, dataset_id, filename, user, version=None):
288
- # url = self.url + f"datasets/{dataset_id}/download/{filename}"
289
- # if version is not None:
290
- # url += "?version=" + str(version)
291
- # headers = self.generate_headers(user)
292
- # response = requests.get(url, headers=headers, stream=True)
293
- # return BytesIO(response.content)
103
+ return data["presigned_url"]
@@ -1,9 +1,7 @@
1
1
  import requests
2
- import geopandas as gpd
3
2
 
4
3
  from ..repos import APIRepo
5
4
 
6
-
7
5
  class ModelsAPIRepo(APIRepo):
8
6
  def __init__(self, url=None):
9
7
  super().__init__(url)
@@ -20,61 +18,24 @@ class ModelsAPIRepo(APIRepo):
20
18
  response = requests.get(url)
21
19
  return self.format_response(response)
22
20
 
23
- def create_model(self, metadata, user):
24
- response = requests.post(
25
- self.url + "models",
26
- json=metadata,
27
- headers=self.generate_headers(user),
28
- )
29
- return self.format_response(response)
30
-
31
21
  def retrieve_model(self, name):
32
22
  response = requests.get(self.url + "models?name=" + name)
33
23
  return self.format_response(response)
34
-
35
- def create_version(self, model_id, user):
24
+
25
+ def create_model(self, metadata, user):
36
26
  response = requests.post(
37
- self.url + "models/version/" + model_id,
38
- headers=self.generate_headers(user),
39
- )
40
- return self.format_response(response)
41
-
42
- def update_model(
43
- self, model_id, authors, source, license, thumbnail, content, user
44
- ):
45
- response = requests.put(
46
- self.url + f"models/{model_id}",
47
- json={
48
- "authors": authors,
49
- "source": source,
50
- "license": license,
51
- "thumbnail": thumbnail,
52
- "description": content,
53
- },
27
+ self.url + "models",
28
+ json=metadata,
54
29
  headers=self.generate_headers(user),
55
30
  )
56
31
  return self.format_response(response)
57
32
 
58
- def create_stac_model(self, name, user):
33
+ def complete_ingestion(self, model_id, version, size, user):
59
34
  response = requests.post(
60
- self.url + "models/stac",
61
- json={"name": name},
62
- headers=self.generate_headers(user),
63
- )
64
- return self.format_response(response)
65
-
66
- def ingest_stac(self, stac_json, model_id, user):
67
- response = requests.put(
68
- self.url + f"models/stac/{model_id}",
69
- json={"stac": stac_json},
35
+ self.url + "models/complete/" + model_id,
36
+ json={"version": version, "size": size},
70
37
  headers=self.generate_headers(user),
71
38
  )
72
39
  return self.format_response(response)
73
40
 
74
- def download_stac(self, model_id, user):
75
- url = self.url + "models/" + model_id + "/download"
76
- headers = self.generate_headers(user)
77
- response = requests.get(url, headers=headers)
78
- if response.status_code != 200:
79
- return None, response.json()["detail"]
80
- return gpd.GeoDataFrame.from_features(response.json()["features"]), None
41
+
@@ -1,5 +1,4 @@
1
1
  import requests
2
- import geopandas as gpd
3
2
 
4
3
  from ..repos import APIRepo
5
4
 
eotdl/tools/time_utils.py CHANGED
@@ -131,11 +131,11 @@ def get_day_between(
131
131
  Get the day between two dates
132
132
  """
133
133
  if isinstance(from_date, str):
134
- from_date = datetime.strptime(from_date, "%Y-%m-%dT%H:%M:%SZ")
134
+ from_date = format_time_acquired(from_date)
135
135
  if isinstance(to_date, str):
136
- to_date = datetime.strptime(to_date, "%Y-%m-%dT%H:%M:%SZ")
136
+ to_date = format_time_acquired(to_date)
137
137
 
138
- date_between = from_date + timedelta(days=1)
138
+ date_between = from_date + (to_date - from_date) / 2
139
139
  date_between = date_between.strftime("%Y-%m-%d")
140
140
 
141
141
  return date_between
@@ -1,36 +1,18 @@
1
- Metadata-Version: 2.3
1
+ Metadata-Version: 2.4
2
2
  Name: eotdl
3
- Version: 2025.3.25
3
+ Version: 2025.4.2.post2
4
4
  Summary: Earth Observation Training Data Lab
5
- License: MIT
6
- Author: EarthPulse
7
- Author-email: it@earthpulse.es
8
- Requires-Python: >=3.8,<4.0
9
- Classifier: License :: OSI Approved :: MIT License
10
- Classifier: Programming Language :: Python :: 3
11
- Classifier: Programming Language :: Python :: 3.8
12
- Classifier: Programming Language :: Python :: 3.9
13
- Classifier: Programming Language :: Python :: 3.10
14
- Classifier: Programming Language :: Python :: 3.11
15
- Classifier: Programming Language :: Python :: 3.12
16
- Classifier: Programming Language :: Python :: 3.13
17
- Requires-Dist: black (>=23.10.1,<24.0.0)
18
- Requires-Dist: geomet (>=1.0.0,<2.0.0)
19
- Requires-Dist: geopandas (>=0.13.2,<0.14.0)
20
- Requires-Dist: markdown (>=3.5.2,<4.0.0)
21
- Requires-Dist: markdownify (>=0.11.6,<0.12.0)
22
- Requires-Dist: mypy (>=1.6.1,<2.0.0)
23
- Requires-Dist: openeo (>=0.31.0,<0.32.0)
24
- Requires-Dist: pydantic (>=1.10.6,<2.0.0)
25
- Requires-Dist: pyjwt (>=2.6.0,<3.0.0)
26
- Requires-Dist: pystac[validation] (==1.8.2)
27
- Requires-Dist: python-frontmatter (>=1.1.0,<2.0.0)
28
- Requires-Dist: pyyaml (>=6.0.1,<7.0.0)
29
- Requires-Dist: rasterio (>=1.3.9,<2.0.0)
30
- Requires-Dist: requests (>=2.28.2,<3.0.0)
31
- Requires-Dist: sentinelhub (>=3.9.1,<4.0.0)
32
- Requires-Dist: tqdm (>=4.65.0,<5.0.0)
33
- Requires-Dist: typer[all] (>=0.9.0,<0.10.0)
5
+ Author-email: earthpulse <it@earthpulse.es>
6
+ License-Expression: MIT
7
+ Requires-Python: >=3.12
8
+ Requires-Dist: geopandas>=1.0.1
9
+ Requires-Dist: pydantic>=2.11.1
10
+ Requires-Dist: pyjwt>=2.10.1
11
+ Requires-Dist: pystac>=1.12.2
12
+ Requires-Dist: python-frontmatter>=1.1.0
13
+ Requires-Dist: stac-geoparquet>=0.6.0
14
+ Requires-Dist: tqdm>=4.67.1
15
+ Requires-Dist: typer>=0.15.2
34
16
  Description-Content-Type: text/markdown
35
17
 
36
18
  <p align="center">
@@ -63,4 +45,4 @@ One of the most limiting factors of AI for EO applications is the scarcity of su
63
45
 
64
46
  Generating TDS is time consuming and expensive. Data access is usually limited and costly, especially for Very High Resolution (VHR) images that allow objects like trees to be clearly identified. In some cases, domain experts or even in-person (in-situ) trips are required to manually confirm the objects in a satellite image are correctly annotated with a high degree of quality. This results in the field of AI for EO applications lagging when compared to other fields, impeding the development of new applications and limiting the full potential of AI in EO.
65
47
 
66
- The European Space Agency (ESA) Earth Observation Training Data Lab (EOTDL) will address key limitations and capability gaps for working with Machine Learning (ML) training data in EO by providing a set of open-source tools to create, share, and improve datasets as well as training ML algorithms in the cloud. EOTDL will also offer an online repository where datasets and models can be explored and accessed.
48
+ The European Space Agency (ESA) Earth Observation Training Data Lab (EOTDL) will address key limitations and capability gaps for working with Machine Learning (ML) training data in EO by providing a set of open-source tools to create, share, and improve datasets as well as training ML algorithms in the cloud. EOTDL will also offer an online repository where datasets and models can be explored and accessed.
@@ -1,22 +1,22 @@
1
- eotdl/__init__.py,sha256=5OBv4eVG7bKLTl5AtMeoKzr4tllf7-R_Vp-xqXk87VQ,27
2
- eotdl/access/__init__.py,sha256=jbyjD7BRGJURlTNmtcbBBhw3Xk4EiZvkqmEykM-bJ1k,231
1
+ eotdl/__init__.py,sha256=ba4I6YAt_kaAovhogjneM5HX0XaIwAqzAW0Ftak1-ns,29
2
+ eotdl/cli.py,sha256=1wtNmiuqjwDB1Me-eSio--dnOabrbdpMiO9dQoIbOoc,702
3
+ eotdl/access/__init__.py,sha256=k-zmTwB6VLoWt_AsXx9CnEKdtONBZAaC8T6vqPMPSjk,436
4
+ eotdl/access/download.py,sha256=e5H8LUkCfIVkFxJFM5EwCMG-R5DHVSHDGLvuNM5DNc8,2815
5
+ eotdl/access/search.py,sha256=1indipTfna4VAfGlKb8gkaYyHAELdHR4cm1mVIDW69s,1415
3
6
  eotdl/access/airbus/__init__.py,sha256=G_kkRS9eFjXbQ-aehmTLXeAxh7zpAxz_rgB7J_w0NRg,107
4
7
  eotdl/access/airbus/client.py,sha256=zjfgB_NTsCCIszoQesYkyLJgheKg-eTh28vbleXYxfw,12018
5
8
  eotdl/access/airbus/parameters.py,sha256=Z8XIrxG5wAOuOoH-fkdKfdNMEMLFp6PaxJN7v4MefMI,1009
6
9
  eotdl/access/airbus/utils.py,sha256=oh_N1Rn4fhcvUgNPpH2QzVvpe4bA0gqRgNguzRVqUps,652
7
- eotdl/access/download.py,sha256=DgemJKafNOlCUVW8OxpSP4br9ij5F1iSrSD-x0B5qFU,1845
8
- eotdl/access/search.py,sha256=JW4MnM3xbXxvsaNCFkRKxPhxhNKJgZAutE2wna6qUpo,631
9
- eotdl/access/sentinelhub/__init__.py,sha256=YpvaUBTRXM26WrXipo51ZUBCDv9WjRIdT8l1Pklpt_M,238
10
- eotdl/access/sentinelhub/client.py,sha256=g40avqlUpIa-WLjD7tK8CL8_SohBA2v3m8NZ0KbIFxc,4098
11
- eotdl/access/sentinelhub/evalscripts.py,sha256=m6cnZ6ryXHgdH2B7RDVSlDHXWfvKi7HMGkTHXEcJsTw,4142
12
- eotdl/access/sentinelhub/parameters.py,sha256=SEal7mCPkADc7lhQL-63t2h5-XCssYpGMvK5Eo3etFU,2078
13
- eotdl/access/sentinelhub/utils.py,sha256=X9Q1YvErBdMsRKszXyaOaG6ZMvPdM2Nl_0SH-dWSFo0,3560
10
+ eotdl/access/sentinelhub/__init__.py,sha256=Y-W5e2goUBTLctY8RLvgQIXP_oafvj3HuEamRMiiRLQ,298
11
+ eotdl/access/sentinelhub/client.py,sha256=PwejwGG8So2W-IxGSPkCYtB1VsPH7UThYXTeg2EUzs0,4135
12
+ eotdl/access/sentinelhub/evalscripts.py,sha256=XU91t6htfr-dlOBfIk0MFZG-yXWZ4hvIWt_P4G3_97A,13778
13
+ eotdl/access/sentinelhub/parameters.py,sha256=U07RDzo0PiIAHP9Q9h4QjS7r1FkG-ych8MFGjz8c3OA,4270
14
+ eotdl/access/sentinelhub/utils.py,sha256=cewMRoNxIx8TYmlcl2Rpj_46f9-IVBVFncBHAra3D28,5114
14
15
  eotdl/auth/__init__.py,sha256=OuGNfJQ-8Kymn4zIywlHQfImEO8DJMJIwOwTQm-u_dc,99
15
16
  eotdl/auth/auth.py,sha256=EjbVFREA2H0sjFJhVqjFZrwjKPzxRJ2x83MTjizpRBs,2029
16
17
  eotdl/auth/errors.py,sha256=E1lv3Igk--J-SOgNH18i8Xx9bXrrMyBSHKt_CAUmGPo,308
17
18
  eotdl/auth/is_logged.py,sha256=QREuhkoDnarZoUZwCxVCNoESGb_Yukh0lJo1pXvrV9Q,115
18
19
  eotdl/auth/logout.py,sha256=P_Sp6WmVvnG3R9V1L9541KNyHFko9DtQPqAKD2vaguw,161
19
- eotdl/cli.py,sha256=1wtNmiuqjwDB1Me-eSio--dnOabrbdpMiO9dQoIbOoc,702
20
20
  eotdl/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
21
  eotdl/commands/auth.py,sha256=WzA0aFGRoscy7fPKQTxiphBc0LJztJxBBl7rjDBRVfI,1544
22
22
  eotdl/commands/datasets.py,sha256=rFdimg_AOp_sf8N1dPHOfwHoqiGBXPOY7bHhg3022v0,5208
@@ -25,28 +25,29 @@ eotdl/commands/stac.py,sha256=Nt7WDzANgcaxJYwr-5XOv887jLrYXlut5dHbh5rKNPU,1440
25
25
  eotdl/curation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
26
  eotdl/curation/stac/__init__.py,sha256=FMi-qzd6v1sdsqZsBRRnedccaJFXJGCPCH3uTctyLYU,37
27
27
  eotdl/curation/stac/api.py,sha256=wAn_oDuvGJOjHOSauLIiXZ9ym8n4jyk-OTIGvCcRAzo,1424
28
- eotdl/curation/stac/stac.py,sha256=eap9aqdLepEdhkoY7PCD1VD-4KwaU7F4rLMeIo7c7F4,1011
29
- eotdl/datasets/__init__.py,sha256=IKEcRM6TFgEU5mnT-VrR90TGJgyDp1nby1qrdshy9wk,170
30
- eotdl/datasets/ingest.py,sha256=7hFkCZuN2fNbiJ0hX0bqVgx2k5vVtxwDqjmtquR7CFs,1008
28
+ eotdl/curation/stac/stac.py,sha256=4f7xrh2CcXTkTs3or1UMVxiFfwtVfTqH4YwTGsbi6No,1013
29
+ eotdl/datasets/__init__.py,sha256=z1jtOk68RRRYqSD55W9CWHhkknHvl8Sc92NvGRdQh2w,194
30
+ eotdl/datasets/ingest.py,sha256=-qXOijYYzwEwhS9IJSynXTLEJS0z_rfJz3DLX14R1TQ,1253
31
31
  eotdl/datasets/retrieve.py,sha256=dhNbBJu0vE0l-LsGQNQF5Vc_WzZDRbXPzvd66GNlV6U,691
32
- eotdl/datasets/stage.py,sha256=7FYAG_k41WYyXilW8gEZ7f77DUkMkqAeHNAqDB6VrY8,2004
32
+ eotdl/datasets/stage.py,sha256=pcU1AsjbczzMHdhCxjKfCuuuLo1OZMMWNAUqj-3SxKc,2162
33
33
  eotdl/datasets/update.py,sha256=x-rpfxnavn9X-7QYkFMGtbn1b3bKmAZydOeS7Tjr5AQ,386
34
34
  eotdl/files/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
35
  eotdl/files/ingest.bck,sha256=dgjZfd-ACCKradDo2B02CPahwEhFtWvnKvTm372K5eo,6185
36
- eotdl/files/ingest.py,sha256=2wEtp6x4jToUkFXW6qREg-C1KzmxYDGGPKGQr8B3LRk,8915
36
+ eotdl/files/ingest.py,sha256=9IHI4h7ULrvmEAqLjhZvIDVm6eIeVXbO-_EtC78o3Co,9287
37
37
  eotdl/files/metadata.py,sha256=C-NDr-zjM58fP8QcHB1N1QyLRUeYyMbT6wPPnxGk8LI,1370
38
38
  eotdl/models/__init__.py,sha256=5mriPl64bzRwcY4O4Bnjt2WeKpX4ab5yXZ3uKzapJfA,158
39
- eotdl/models/ingest.py,sha256=OnXpYHABJHHIGnu-qSunCb5Usg8pXZFhrHbCVZJx7lk,981
39
+ eotdl/models/download.py,sha256=rRT3fG-qS3-SXfzFdqy0cuiDnOIV9Du74JCnsbbA9Ps,3475
40
+ eotdl/models/ingest.py,sha256=Xf5u360SmtRsPTR6pMKCE_o8pxPVbhmvgJYdrZpXa8o,1219
40
41
  eotdl/models/retrieve.py,sha256=-Ij7dT4J1p7MW4n13OlPB9OW4tBaBXPwk9dW8IuCZPc,664
41
- eotdl/models/stage.py,sha256=nlLL5kYOMJPDPR3orhm5ZOZnZrqw0Q37nV6gWylBIbE,1805
42
+ eotdl/models/stage.py,sha256=rvWN8vcBz7qHhu0TzJ90atw1kEr3JPKF0k2S-Sv-JVs,1944
42
43
  eotdl/models/update.py,sha256=4FWeD95cXvRpefRjw3Foqb30e30otxqWUZ6nQM9cbmM,374
43
44
  eotdl/repos/APIRepo.py,sha256=fcMpVbatfJgAq12bGWM828n8UDOixBbf5ueleB_Hrc4,791
44
45
  eotdl/repos/AuthAPIRepo.py,sha256=vYCqFawe3xUm2cx4SqVXCvzl8J_sr9rs_MkipYC0bXE,957
45
46
  eotdl/repos/AuthRepo.py,sha256=jpzzhINCcDZHRCyrPDsp49h17IlXp2HvX3BB3f5cnb4,1154
46
- eotdl/repos/DatasetsAPIRepo.py,sha256=Yy22IoiASPmca93r4Rt5lzq28TFQkq3aOl_M4u8VJw8,3236
47
- eotdl/repos/FilesAPIRepo.py,sha256=le8Xzt1zgB3MmLYoCN46zxcFLQvJXJoOeXQULj1DkCI,11079
48
- eotdl/repos/ModelsAPIRepo.py,sha256=79euf5WsfUxG5KSIGhKT8T7kSl-NtISwxvqHnck-bq0,2616
49
- eotdl/repos/STACAPIRepo.py,sha256=YtLd-Wl2mOM4MtT7nCFHd26oeNleq9POKajJuhEt-74,1407
47
+ eotdl/repos/DatasetsAPIRepo.py,sha256=_7n2jzjT01tP3fl51AC28FCv1iuIy-CvcNbJdh0wKg8,1434
48
+ eotdl/repos/FilesAPIRepo.py,sha256=3nvxre9TmfWoDjjmMBhrNfssiRZXfIdORK2aeEwoGIk,3765
49
+ eotdl/repos/ModelsAPIRepo.py,sha256=4CI5chA1D3ewP8b1BBbJwugiKHJwWugEI2F9WuyhlRU,1250
50
+ eotdl/repos/STACAPIRepo.py,sha256=bvc2oQp967jX_kG9fKYmTc496xPdL0cSGfa0mCQMKNI,1383
50
51
  eotdl/repos/__init__.py,sha256=GIzk62681dvNzYgVzvJgrMzVRhrep4-kJH6lTOtfnT8,258
51
52
  eotdl/shared/__init__.py,sha256=mF7doJC8Z5eTPmB01UQvPivThZac32DRY33T6qshXfg,41
52
53
  eotdl/shared/checksum.py,sha256=4IB6N9jRO0chMDNJzpdnFDhC9wcFF9bO5oHq2HodcHw,479
@@ -55,11 +56,11 @@ eotdl/tools/geo_utils.py,sha256=JKHUAnqkwiIrvh5voDclWAW-i57qVqH2FUjeOt1TQf4,7547
55
56
  eotdl/tools/metadata.py,sha256=RvNmoMdfEKoo-DzhEAqL-f9ZCjIe_bsdHQwACMk6w1E,1664
56
57
  eotdl/tools/paths.py,sha256=yWhOtVxX4NxrDrrBX2fuye5N1mAqrxXFy_eA7dffd84,1152
57
58
  eotdl/tools/stac.py,sha256=ovXdrPm4Sn9AAJmrP88WnxDmq2Ut-xPoscjphxz3Iyo,5763
58
- eotdl/tools/time_utils.py,sha256=qJ3-rk1I7ne722SLfAP6-59kahQ0vLQqIf9VpOi0Kpg,4691
59
+ eotdl/tools/time_utils.py,sha256=JHrQ3PxXkhwor8zcOFccf26zOG9WBtb9xHb6j-Fqa9k,4661
59
60
  eotdl/tools/tools.py,sha256=Tl4_v2ejkQo_zyZek8oofJwoYcdVosdOwW1C0lvWaNM,6354
60
61
  eotdl/wrappers/__init__.py,sha256=IY3DK_5LMbc5bIQFleQA9kzFbPhWuTLesJ8dwfvpkdA,32
61
62
  eotdl/wrappers/models.py,sha256=kNO4pYw9KKKmElE7bZWWHGs7FIThNUXj8XciKh_3rNw,6432
62
- eotdl-2025.3.25.dist-info/METADATA,sha256=zGb2kpfJo_dacJpUd4_GGaznMSJ3QamvXlbbqGP-Iak,4189
63
- eotdl-2025.3.25.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
64
- eotdl-2025.3.25.dist-info/entry_points.txt,sha256=s6sfxUfRrSX2IP2UbrzTFTvRCtLgw3_OKcHlOKf_5F8,39
65
- eotdl-2025.3.25.dist-info/RECORD,,
63
+ eotdl-2025.4.2.post2.dist-info/METADATA,sha256=6JK2dcBCiFpFv2u_X899qMEqrkt55lmtNTvqulvppuo,3338
64
+ eotdl-2025.4.2.post2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
65
+ eotdl-2025.4.2.post2.dist-info/entry_points.txt,sha256=FV4dFIZ5zdWj1q1nUEEip29n3sAgbviVOizEz00gEF0,40
66
+ eotdl-2025.4.2.post2.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.1.1
2
+ Generator: hatchling 1.27.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ eotdl = eotdl.cli:app
@@ -1,3 +0,0 @@
1
- [console_scripts]
2
- eotdl=eotdl.cli:app
3
-