eotdl 2025.4.2__py3-none-any.whl → 2025.4.2.post2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
eotdl/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "2025.04.02"
1
+ __version__ = "2025.04.02-2"
@@ -1,9 +1,7 @@
1
1
  import requests
2
- import geopandas as gpd
3
2
 
4
3
  from ..repos import APIRepo
5
4
 
6
-
7
5
  class DatasetsAPIRepo(APIRepo):
8
6
  def __init__(self, url=None):
9
7
  super().__init__(url)
@@ -43,51 +41,3 @@ class DatasetsAPIRepo(APIRepo):
43
41
  headers=self.generate_headers(user),
44
42
  )
45
43
  return self.format_response(response)
46
-
47
-
48
- # def create_version(self, dataset_id, user):
49
- # response = requests.post(
50
- # self.url + "datasets/version/" + dataset_id,
51
- # headers=self.generate_headers(user),
52
- # )
53
- # return self.format_response(response)
54
-
55
- # def create_stac_dataset(self, name, user):
56
- # response = requests.post(
57
- # self.url + "datasets/stac",
58
- # json={"name": name},
59
- # headers=self.generate_headers(user),
60
- # )
61
- # return self.format_response(response)
62
-
63
- # def ingest_stac(self, stac_json, dataset_id, user):
64
- # response = requests.put(
65
- # self.url + f"datasets/stac/{dataset_id}",
66
- # json={"stac": stac_json},
67
- # headers=self.generate_headers(user),
68
- # )
69
- # return self.format_response(response)
70
-
71
- # def download_stac(self, dataset_id, user):
72
- # url = self.url + "datasets/" + dataset_id + "/download"
73
- # headers = self.generate_headers(user)
74
- # response = requests.get(url, headers=headers)
75
- # if response.status_code != 200:
76
- # return None, response.json()["detail"]
77
- # return gpd.GeoDataFrame.from_features(response.json()["features"]), None
78
-
79
- # def update_dataset(
80
- # self, dataset_id, authors, source, license, thumbnail, content, user
81
- # ):
82
- # response = requests.put(
83
- # self.url + f"datasets/{dataset_id}",
84
- # json={
85
- # "authors": authors,
86
- # "source": source,
87
- # "license": license,
88
- # "thumbnail": thumbnail,
89
- # "description": content,
90
- # },
91
- # headers=self.generate_headers(user),
92
- # )
93
- # return self.format_response(response)
@@ -1,8 +1,5 @@
1
1
  import requests
2
2
  import os
3
- from tqdm import tqdm
4
- import hashlib
5
- from io import BytesIO
6
3
 
7
4
  from ..repos import APIRepo
8
5
 
@@ -103,191 +100,4 @@ class FilesAPIRepo(APIRepo):
103
100
  if error:
104
101
  # print("ERROR generate_presigned_url", error)
105
102
  return None
106
- return data["presigned_url"]
107
-
108
- # can we download large files?
109
-
110
- # with requests.get(presigned_url, headers=headers, stream=True) as r:
111
- # r.raise_for_status()
112
- # total_size = int(r.headers.get("content-length", 0))
113
- # block_size = 1024 * 1024 * 10
114
- # progress = progress and total_size > 1024 * 1024 * 16
115
- # if progress:
116
- # progress_bar = tqdm(
117
- # total=total_size,
118
- # unit="iB",
119
- # unit_scale=True,
120
- # unit_divisor=1024,
121
- # position=1,
122
- # )
123
- # with open(path, "wb") as f:
124
- # for chunk in r.iter_content(block_size):
125
- # if progress:
126
- # progress_bar.update(len(chunk))
127
- # if chunk:
128
- # f.write(chunk)
129
- # if progress:
130
- # progress_bar.close()
131
- # return path
132
-
133
-
134
-
135
- # def ingest_files_batch(
136
- # self,
137
- # batch, # ziped batch of files
138
- # checksums,
139
- # dataset_or_model_id,
140
- # user,
141
- # endpoint,
142
- # version=None,
143
- # ):
144
- # url = self.url + f"{endpoint}/{dataset_or_model_id}/batch"
145
- # if version is not None:
146
- # url += "?version=" + str(version)
147
- # reponse = requests.post(
148
- # url,
149
- # files={"batch": ("batch.zip", batch)},
150
- # data={"checksums": checksums},
151
- # headers=self.generate_headers(user),
152
- # )
153
- # return self.format_response(reponse)
154
-
155
- # def add_files_batch_to_version(
156
- # self,
157
- # batch,
158
- # dataset_or_model_id,
159
- # version,
160
- # user,
161
- # endpoint,
162
- # ):
163
- # reponse = requests.post(
164
- # self.url + f"{endpoint}/{dataset_or_model_id}/files?version={str(version)}",
165
- # data={
166
- # "filenames": [f["path"] for f in batch],
167
- # "checksums": [f["checksum"] for f in batch],
168
- # },
169
- # headers=self.generate_headers(user),
170
- # )
171
- # return self.format_response(reponse)
172
-
173
- # def retrieve_files(self, dataset_or_model_id, endpoint, version=None):
174
- # url = f"{self.url}{endpoint}/{dataset_or_model_id}/files"
175
- # if version is not None:
176
- # url += "?version=" + str(version)
177
- # response = requests.get(url)
178
- # return self.format_response(response)
179
-
180
-
181
-
182
- # def download_file_url(self, url, filename, path, user, progress=False):
183
- # headers = self.generate_headers(user)
184
- # path = f"{path}/{filename}"
185
- # for i in range(1, len(path.split("/")) - 1):
186
- # # print("/".join(path.split("/")[: i + 1]))
187
- # os.makedirs("/".join(path.split("/")[: i + 1]), exist_ok=True)
188
- # with requests.get(url, headers=headers, stream=True) as r:
189
- # r.raise_for_status()
190
- # total_size = int(r.headers.get("content-length", 0))
191
- # block_size = 1024 * 1024 * 10
192
- # progress = progress and total_size > 1024 * 1024 * 16
193
- # if progress:
194
- # progress_bar = tqdm(
195
- # total=total_size,
196
- # unit="iB",
197
- # unit_scale=True,
198
- # unit_divisor=1024,
199
- # position=1,
200
- # )
201
- # with open(path, "wb") as f:
202
- # for chunk in r.iter_content(block_size):
203
- # if progress:
204
- # progress_bar.update(len(chunk))
205
- # if chunk:
206
- # f.write(chunk)
207
- # if progress:
208
- # progress_bar.close()
209
- # return path
210
-
211
- # def prepare_large_upload(
212
- # self, filename, dataset_or_model_id, checksum, user, endpoint
213
- # ):
214
- # response = requests.post(
215
- # self.url + f"{endpoint}/{dataset_or_model_id}/uploadId",
216
- # json={"filname": filename, "checksum": checksum},
217
- # headers=self.generate_headers(user),
218
- # )
219
- # if response.status_code != 200:
220
- # raise Exception(response.json()["detail"])
221
- # data = response.json()
222
- # upload_id, parts = (
223
- # data["upload_id"],
224
- # data["parts"] if "parts" in data else [],
225
- # )
226
- # return upload_id, parts
227
-
228
- # def get_chunk_size(self, content_size):
229
- # # adapt chunk size to content size to avoid S3 limits (10000 parts, 500MB per part, 5TB per object)
230
- # chunk_size = 1024 * 1024 * 10 # 10 MB (up to 100 GB, 10000 parts)
231
- # if content_size >= 1024 * 1024 * 1024 * 100: # 100 GB
232
- # chunk_size = 1024 * 1024 * 100 # 100 MB (up to 1 TB, 10000 parts)
233
- # elif content_size >= 1024 * 1024 * 1024 * 1000: # 1 TB
234
- # chunk_size = 1024 * 1024 * 500 # 0.5 GB (up to 5 TB, 10000 parts)
235
- # return chunk_size
236
-
237
- # def read_in_chunks(self, file_object, CHUNK_SIZE):
238
- # while True:
239
- # data = file_object.read(CHUNK_SIZE)
240
- # if not data:
241
- # break
242
- # yield data
243
-
244
- # def ingest_large_file(
245
- # self, file_path, files_size, upload_id, user, parts, endpoint
246
- # ):
247
- # print(endpoint)
248
- # # content_path = os.path.abspath(file)
249
- # # content_size = os.stat(content_path).st_size
250
- # chunk_size = self.get_chunk_size(files_size)
251
- # total_chunks = files_size // chunk_size
252
- # # upload chunks sequentially
253
- # pbar = tqdm(
254
- # self.read_in_chunks(open(file_path, "rb"), chunk_size),
255
- # total=total_chunks,
256
- # )
257
- # index = 0
258
- # for chunk in pbar:
259
- # part = index // chunk_size + 1
260
- # offset = index + len(chunk)
261
- # index = offset
262
- # if part not in parts:
263
- # checksum = hashlib.md5(chunk).hexdigest()
264
- # response = requests.post(
265
- # f"{self.url}{endpoint}/chunk/{upload_id}",
266
- # files={"file": chunk},
267
- # data={"part_number": part, "checksum": checksum},
268
- # headers=self.generate_headers(user),
269
- # )
270
- # if response.status_code != 200:
271
- # raise Exception(response.json()["detail"])
272
- # pbar.set_description(
273
- # "{:.2f}/{:.2f} MB".format(
274
- # offset / 1024 / 1024, files_size / 1024 / 1024
275
- # )
276
- # )
277
- # pbar.close()
278
- # return
279
-
280
- # def complete_upload(self, user, upload_id, version, endpoint):
281
- # r = requests.post(
282
- # f"{self.url}{endpoint}/complete/{upload_id}?version={version}",
283
- # headers=self.generate_headers(user),
284
- # )
285
- # return self.format_response(r)
286
-
287
- # def get_file_stream(self, dataset_id, filename, user, version=None):
288
- # url = self.url + f"datasets/{dataset_id}/download/{filename}"
289
- # if version is not None:
290
- # url += "?version=" + str(version)
291
- # headers = self.generate_headers(user)
292
- # response = requests.get(url, headers=headers, stream=True)
293
- # return BytesIO(response.content)
103
+ return data["presigned_url"]
@@ -1,9 +1,7 @@
1
1
  import requests
2
- import geopandas as gpd
3
2
 
4
3
  from ..repos import APIRepo
5
4
 
6
-
7
5
  class ModelsAPIRepo(APIRepo):
8
6
  def __init__(self, url=None):
9
7
  super().__init__(url)
@@ -40,49 +38,4 @@ class ModelsAPIRepo(APIRepo):
40
38
  )
41
39
  return self.format_response(response)
42
40
 
43
- # def create_version(self, model_id, user):
44
- # response = requests.post(
45
- # self.url + "models/version/" + model_id,
46
- # headers=self.generate_headers(user),
47
- # )
48
- # return self.format_response(response)
49
-
50
- # def update_model(
51
- # self, model_id, authors, source, license, thumbnail, content, user
52
- # ):
53
- # response = requests.put(
54
- # self.url + f"models/{model_id}",
55
- # json={
56
- # "authors": authors,
57
- # "source": source,
58
- # "license": license,
59
- # "thumbnail": thumbnail,
60
- # "description": content,
61
- # },
62
- # headers=self.generate_headers(user),
63
- # )
64
- # return self.format_response(response)
65
-
66
- # def create_stac_model(self, name, user):
67
- # response = requests.post(
68
- # self.url + "models/stac",
69
- # json={"name": name},
70
- # headers=self.generate_headers(user),
71
- # )
72
- # return self.format_response(response)
73
-
74
- # def ingest_stac(self, stac_json, model_id, user):
75
- # response = requests.put(
76
- # self.url + f"models/stac/{model_id}",
77
- # json={"stac": stac_json},
78
- # headers=self.generate_headers(user),
79
- # )
80
- # return self.format_response(response)
81
-
82
- # def download_stac(self, model_id, user):
83
- # url = self.url + "models/" + model_id + "/download"
84
- # headers = self.generate_headers(user)
85
- # response = requests.get(url, headers=headers)
86
- # if response.status_code != 200:
87
- # return None, response.json()["detail"]
88
- # return gpd.GeoDataFrame.from_features(response.json()["features"]), None
41
+
@@ -1,5 +1,4 @@
1
1
  import requests
2
- import geopandas as gpd
3
2
 
4
3
  from ..repos import APIRepo
5
4
 
@@ -1,23 +1,18 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: eotdl
3
- Version: 2025.4.2
3
+ Version: 2025.4.2.post2
4
4
  Summary: Earth Observation Training Data Lab
5
5
  Author-email: earthpulse <it@earthpulse.es>
6
6
  License-Expression: MIT
7
- Requires-Python: >=3.8
8
- Requires-Dist: geomet>=1.1.0
9
- Requires-Dist: geopandas>=0.13.2
10
- Requires-Dist: markdown>=3.7
11
- Requires-Dist: pydantic>=2.10.6
12
- Requires-Dist: pyjwt>=2.9.0
13
- Requires-Dist: pystac>=1.8.4
7
+ Requires-Python: >=3.12
8
+ Requires-Dist: geopandas>=1.0.1
9
+ Requires-Dist: pydantic>=2.11.1
10
+ Requires-Dist: pyjwt>=2.10.1
11
+ Requires-Dist: pystac>=1.12.2
14
12
  Requires-Dist: python-frontmatter>=1.1.0
15
- Requires-Dist: pyyaml>=6.0.2
16
- Requires-Dist: rasterio>=1.3.11
17
- Requires-Dist: requests>=2.32.3
18
- Requires-Dist: sentinelhub>=3.11.1
13
+ Requires-Dist: stac-geoparquet>=0.6.0
19
14
  Requires-Dist: tqdm>=4.67.1
20
- Requires-Dist: typer>=0.15.1
15
+ Requires-Dist: typer>=0.15.2
21
16
  Description-Content-Type: text/markdown
22
17
 
23
18
  <p align="center">
@@ -1,4 +1,4 @@
1
- eotdl/__init__.py,sha256=Q321tl9ms35mPhMUUEAsd6HfOFIZ3i94xC69CF4wloU,27
1
+ eotdl/__init__.py,sha256=ba4I6YAt_kaAovhogjneM5HX0XaIwAqzAW0Ftak1-ns,29
2
2
  eotdl/cli.py,sha256=1wtNmiuqjwDB1Me-eSio--dnOabrbdpMiO9dQoIbOoc,702
3
3
  eotdl/access/__init__.py,sha256=k-zmTwB6VLoWt_AsXx9CnEKdtONBZAaC8T6vqPMPSjk,436
4
4
  eotdl/access/download.py,sha256=e5H8LUkCfIVkFxJFM5EwCMG-R5DHVSHDGLvuNM5DNc8,2815
@@ -44,10 +44,10 @@ eotdl/models/update.py,sha256=4FWeD95cXvRpefRjw3Foqb30e30otxqWUZ6nQM9cbmM,374
44
44
  eotdl/repos/APIRepo.py,sha256=fcMpVbatfJgAq12bGWM828n8UDOixBbf5ueleB_Hrc4,791
45
45
  eotdl/repos/AuthAPIRepo.py,sha256=vYCqFawe3xUm2cx4SqVXCvzl8J_sr9rs_MkipYC0bXE,957
46
46
  eotdl/repos/AuthRepo.py,sha256=jpzzhINCcDZHRCyrPDsp49h17IlXp2HvX3BB3f5cnb4,1154
47
- eotdl/repos/DatasetsAPIRepo.py,sha256=Yy22IoiASPmca93r4Rt5lzq28TFQkq3aOl_M4u8VJw8,3236
48
- eotdl/repos/FilesAPIRepo.py,sha256=poGis1oRhv4k8UnBy6eNLwxA9XPaVbG2yFuWycL-uhw,11081
49
- eotdl/repos/ModelsAPIRepo.py,sha256=VWtxQsMMPi8DxXyyO2tHJyYhnK3Y_k-m5YWLyEXWHrI,3016
50
- eotdl/repos/STACAPIRepo.py,sha256=YtLd-Wl2mOM4MtT7nCFHd26oeNleq9POKajJuhEt-74,1407
47
+ eotdl/repos/DatasetsAPIRepo.py,sha256=_7n2jzjT01tP3fl51AC28FCv1iuIy-CvcNbJdh0wKg8,1434
48
+ eotdl/repos/FilesAPIRepo.py,sha256=3nvxre9TmfWoDjjmMBhrNfssiRZXfIdORK2aeEwoGIk,3765
49
+ eotdl/repos/ModelsAPIRepo.py,sha256=4CI5chA1D3ewP8b1BBbJwugiKHJwWugEI2F9WuyhlRU,1250
50
+ eotdl/repos/STACAPIRepo.py,sha256=bvc2oQp967jX_kG9fKYmTc496xPdL0cSGfa0mCQMKNI,1383
51
51
  eotdl/repos/__init__.py,sha256=GIzk62681dvNzYgVzvJgrMzVRhrep4-kJH6lTOtfnT8,258
52
52
  eotdl/shared/__init__.py,sha256=mF7doJC8Z5eTPmB01UQvPivThZac32DRY33T6qshXfg,41
53
53
  eotdl/shared/checksum.py,sha256=4IB6N9jRO0chMDNJzpdnFDhC9wcFF9bO5oHq2HodcHw,479
@@ -60,7 +60,7 @@ eotdl/tools/time_utils.py,sha256=JHrQ3PxXkhwor8zcOFccf26zOG9WBtb9xHb6j-Fqa9k,466
60
60
  eotdl/tools/tools.py,sha256=Tl4_v2ejkQo_zyZek8oofJwoYcdVosdOwW1C0lvWaNM,6354
61
61
  eotdl/wrappers/__init__.py,sha256=IY3DK_5LMbc5bIQFleQA9kzFbPhWuTLesJ8dwfvpkdA,32
62
62
  eotdl/wrappers/models.py,sha256=kNO4pYw9KKKmElE7bZWWHGs7FIThNUXj8XciKh_3rNw,6432
63
- eotdl-2025.4.2.dist-info/METADATA,sha256=N3CIcPuh2pwfFaR8al9XF7iNyvQ8ctA60w9iPVZmw-U,3478
64
- eotdl-2025.4.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
65
- eotdl-2025.4.2.dist-info/entry_points.txt,sha256=FV4dFIZ5zdWj1q1nUEEip29n3sAgbviVOizEz00gEF0,40
66
- eotdl-2025.4.2.dist-info/RECORD,,
63
+ eotdl-2025.4.2.post2.dist-info/METADATA,sha256=6JK2dcBCiFpFv2u_X899qMEqrkt55lmtNTvqulvppuo,3338
64
+ eotdl-2025.4.2.post2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
65
+ eotdl-2025.4.2.post2.dist-info/entry_points.txt,sha256=FV4dFIZ5zdWj1q1nUEEip29n3sAgbviVOizEz00gEF0,40
66
+ eotdl-2025.4.2.post2.dist-info/RECORD,,