dragoneye-python 0.2.0__tar.gz → 0.4.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. dragoneye_python-0.4.0/PKG-INFO +10 -0
  2. dragoneye_python-0.4.0/dragoneye_python.egg-info/PKG-INFO +10 -0
  3. {dragoneye_python-0.2.0 → dragoneye_python-0.4.0}/dragoneye_python.egg-info/SOURCES.txt +4 -2
  4. dragoneye_python-0.4.0/dragoneye_python.egg-info/requires.txt +5 -0
  5. {dragoneye_python-0.2.0 → dragoneye_python-0.4.0}/pyproject.toml +6 -1
  6. dragoneye_python-0.4.0/requirements.txt +5 -0
  7. {dragoneye_python-0.2.0 → dragoneye_python-0.4.0}/src/dragoneye/__init__.py +5 -2
  8. dragoneye_python-0.4.0/src/dragoneye/classification.py +367 -0
  9. dragoneye_python-0.4.0/src/dragoneye/client.py +29 -0
  10. dragoneye_python-0.4.0/src/dragoneye/constants.py +2 -0
  11. dragoneye_python-0.4.0/src/dragoneye/models.py +49 -0
  12. {dragoneye_python-0.2.0 → dragoneye_python-0.4.0}/src/dragoneye/types/common.py +6 -1
  13. dragoneye_python-0.4.0/src/dragoneye/types/exception.py +18 -0
  14. dragoneye_python-0.4.0/src/dragoneye/types/media.py +186 -0
  15. dragoneye_python-0.2.0/LICENSE +0 -21
  16. dragoneye_python-0.2.0/PKG-INFO +0 -7
  17. dragoneye_python-0.2.0/dragoneye_python.egg-info/PKG-INFO +0 -7
  18. dragoneye_python-0.2.0/dragoneye_python.egg-info/requires.txt +0 -2
  19. dragoneye_python-0.2.0/requirements.txt +0 -2
  20. dragoneye_python-0.2.0/src/dragoneye/classification.py +0 -117
  21. dragoneye_python-0.2.0/src/dragoneye/client.py +0 -18
  22. dragoneye_python-0.2.0/src/dragoneye/types/image.py +0 -12
  23. {dragoneye_python-0.2.0 → dragoneye_python-0.4.0}/README.md +0 -0
  24. {dragoneye_python-0.2.0 → dragoneye_python-0.4.0}/dragoneye_python.egg-info/dependency_links.txt +0 -0
  25. {dragoneye_python-0.2.0 → dragoneye_python-0.4.0}/dragoneye_python.egg-info/top_level.txt +0 -0
  26. {dragoneye_python-0.2.0 → dragoneye_python-0.4.0}/setup.cfg +0 -0
  27. {dragoneye_python-0.2.0 → dragoneye_python-0.4.0}/src/dragoneye/types/__init__.py +0 -0
@@ -0,0 +1,10 @@
1
+ Metadata-Version: 2.4
2
+ Name: dragoneye-python
3
+ Version: 0.4.0
4
+ License-Expression: MIT
5
+ Requires-Python: >=3.8
6
+ Requires-Dist: requests
7
+ Requires-Dist: pydantic>=2
8
+ Requires-Dist: typing-extensions>=4.0.0
9
+ Requires-Dist: backoff>=2.0.0
10
+ Requires-Dist: aiohttp
@@ -0,0 +1,10 @@
1
+ Metadata-Version: 2.4
2
+ Name: dragoneye-python
3
+ Version: 0.4.0
4
+ License-Expression: MIT
5
+ Requires-Python: >=3.8
6
+ Requires-Dist: requests
7
+ Requires-Dist: pydantic>=2
8
+ Requires-Dist: typing-extensions>=4.0.0
9
+ Requires-Dist: backoff>=2.0.0
10
+ Requires-Dist: aiohttp
@@ -1,4 +1,3 @@
1
- LICENSE
2
1
  README.md
3
2
  pyproject.toml
4
3
  requirements.txt
@@ -10,6 +9,9 @@ dragoneye_python.egg-info/top_level.txt
10
9
  src/dragoneye/__init__.py
11
10
  src/dragoneye/classification.py
12
11
  src/dragoneye/client.py
12
+ src/dragoneye/constants.py
13
+ src/dragoneye/models.py
13
14
  src/dragoneye/types/__init__.py
14
15
  src/dragoneye/types/common.py
15
- src/dragoneye/types/image.py
16
+ src/dragoneye/types/exception.py
17
+ src/dragoneye/types/media.py
@@ -0,0 +1,5 @@
1
+ requests
2
+ pydantic>=2
3
+ typing-extensions>=4.0.0
4
+ backoff>=2.0.0
5
+ aiohttp
@@ -1,8 +1,13 @@
1
+ [build-system]
2
+ requires = ["setuptools>=77.0.3", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
1
5
  [project]
2
6
  name = "dragoneye-python"
3
- version = "0.2.0"
7
+ version = "0.4.0"
4
8
  requires-python = ">=3.8"
5
9
  dynamic = ["dependencies"]
10
+ license = "MIT"
6
11
 
7
12
  [tool.setuptools.dynamic]
8
13
  dependencies = {file = ["requirements.txt"]}
@@ -0,0 +1,5 @@
1
+ requests
2
+ pydantic>=2
3
+ typing-extensions>=4.0.0
4
+ backoff>=2.0.0
5
+ aiohttp
@@ -1,12 +1,14 @@
1
1
  from .classification import (
2
2
  Classification,
3
+ )
4
+ from .client import Dragoneye
5
+ from .models import (
3
6
  ClassificationObjectPrediction,
4
7
  ClassificationPredictImageResponse,
5
8
  ClassificationTraitRootPrediction,
6
9
  )
7
- from .client import Dragoneye
8
10
  from .types.common import NormalizedBbox, TaxonID, TaxonPrediction, TaxonType
9
- from .types.image import Image
11
+ from .types.media import Image, Video
10
12
 
11
13
  __all__ = [
12
14
  "Classification",
@@ -15,6 +17,7 @@ __all__ = [
15
17
  "ClassificationTraitRootPrediction",
16
18
  "Dragoneye",
17
19
  "Image",
20
+ "Video",
18
21
  "NormalizedBbox",
19
22
  "TaxonID",
20
23
  "TaxonPrediction",
@@ -0,0 +1,367 @@
1
+ import asyncio
2
+ import logging
3
+ import time
4
+ from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Union, overload
5
+
6
+ import aiohttp
7
+ import backoff
8
+ from aiohttp import ClientError
9
+ from pydantic import BaseModel
10
+
11
+ from .constants import FAILED_STATUS_PREFIX, PREDICTED_STATUS
12
+ from .models import (
13
+ ClassificationPredictImageResponse,
14
+ ClassificationPredictVideoResponse,
15
+ PredictionTaskStatusResponse,
16
+ )
17
+ from .types.common import (
18
+ BASE_API_URL,
19
+ PredictionTaskState,
20
+ PredictionTaskUUID,
21
+ PredictionType,
22
+ )
23
+ from .types.exception import (
24
+ PredictionTaskBeginError,
25
+ PredictionTaskError,
26
+ PredictionTaskResultsUnavailableError,
27
+ PredictionTimeoutException,
28
+ PredictionUploadError,
29
+ )
30
+ from .types.media import Image, Media, Video
31
+
32
+ if TYPE_CHECKING:
33
+ from .client import Dragoneye
34
+
35
+
36
+ class _PresignedPostRequest(BaseModel):
37
+ url: str
38
+ fields: Dict[str, Any]
39
+
40
+
41
+ class _MediaUploadUrl(BaseModel):
42
+ blob_path: str
43
+ presigned_post_request: _PresignedPostRequest
44
+
45
+
46
+ class _PredictionTaskBeginResponse(BaseModel):
47
+ prediction_task_uuid: PredictionTaskUUID
48
+ prediction_type: PredictionType
49
+ signed_urls: List[_MediaUploadUrl]
50
+
51
+
52
+ def _is_task_successful(status: PredictionTaskState) -> bool:
53
+ return status == PREDICTED_STATUS
54
+
55
+
56
+ def _is_task_failed(status: PredictionTaskState) -> bool:
57
+ return status.startswith(FAILED_STATUS_PREFIX)
58
+
59
+
60
+ def _is_task_complete(status: PredictionTaskState) -> bool:
61
+ """
62
+ Returns True if the prediction task is complete, either successfully or unsuccessfully.
63
+
64
+ Avoid enum to allow additional states to be backwards compatible.
65
+ """
66
+ return _is_task_successful(status) or _is_task_failed(status)
67
+
68
+
69
+ class Classification:
70
+ def __init__(self, client: "Dragoneye"):
71
+ self._client = client
72
+
73
+ # Create a reusable backoff decorator for 429 rate limit errors
74
+ def _should_retry_429(exception: Exception) -> bool:
75
+ """Check if exception is a 429 rate limit error"""
76
+ return (
77
+ isinstance(exception, aiohttp.ClientResponseError)
78
+ and exception.status == 429
79
+ )
80
+
81
+ # Store the backoff decorator as an instance method
82
+ self._backoff_on_429 = backoff.on_exception(
83
+ wait_gen=backoff.expo,
84
+ exception=aiohttp.ClientResponseError,
85
+ max_tries=client.max_retries,
86
+ max_time=client.max_backoff_time,
87
+ on_backoff=lambda e: logging.info(
88
+ f"Rate limit exceeded - backing off: {e}"
89
+ ),
90
+ on_giveup=lambda e: logging.info(f"Rate limit exceeded - giving up: {e}"),
91
+ giveup=lambda e: not _should_retry_429(e),
92
+ jitter=client.backoff_jitter,
93
+ )
94
+
95
+ async def predict_image(
96
+ self,
97
+ media: Image,
98
+ model_name: str,
99
+ timeout_seconds: Optional[int] = None,
100
+ ) -> ClassificationPredictImageResponse:
101
+ return await self._predict_unified(
102
+ media=media,
103
+ model_name=model_name,
104
+ frames_per_second=None,
105
+ timeout_seconds=timeout_seconds,
106
+ )
107
+
108
+ async def predict_video(
109
+ self,
110
+ media: Video,
111
+ model_name: str,
112
+ frames_per_second: int = 1,
113
+ timeout_seconds: Optional[int] = None,
114
+ ) -> ClassificationPredictVideoResponse:
115
+ return await self._predict_unified(
116
+ media=media,
117
+ model_name=model_name,
118
+ frames_per_second=frames_per_second,
119
+ timeout_seconds=timeout_seconds,
120
+ )
121
+
122
+ async def status(
123
+ self, prediction_task_uuid: PredictionTaskUUID
124
+ ) -> PredictionTaskStatusResponse:
125
+ """
126
+ Given a prediction task UUID, return
127
+ """
128
+ url = f"{BASE_API_URL}/prediction-task/status?predictionTaskUuid={prediction_task_uuid}"
129
+ headers = {"Authorization": f"Bearer {self._client.api_key}"}
130
+
131
+ @self._backoff_on_429
132
+ async def _make_request():
133
+ async with aiohttp.ClientSession() as session:
134
+ async with session.get(url, headers=headers) as resp:
135
+ resp.raise_for_status()
136
+ payload = await resp.json()
137
+ return payload
138
+
139
+ payload = await _make_request()
140
+ return PredictionTaskStatusResponse.model_validate(payload)
141
+
142
+ async def get_image_results(
143
+ self,
144
+ prediction_task_uuid: PredictionTaskUUID,
145
+ ) -> ClassificationPredictImageResponse:
146
+ return await self._get_results_unified(
147
+ prediction_task_uuid=prediction_task_uuid,
148
+ prediction_type="image",
149
+ )
150
+
151
+ async def get_video_results(
152
+ self,
153
+ prediction_task_uuid: PredictionTaskUUID,
154
+ ) -> ClassificationPredictVideoResponse:
155
+ return await self._get_results_unified(
156
+ prediction_task_uuid=prediction_task_uuid,
157
+ prediction_type="video",
158
+ )
159
+
160
+ @overload
161
+ async def _get_results_unified(
162
+ self,
163
+ prediction_task_uuid: PredictionTaskUUID,
164
+ prediction_type: Literal["image"],
165
+ ) -> ClassificationPredictImageResponse: ...
166
+
167
+ @overload
168
+ async def _get_results_unified(
169
+ self,
170
+ prediction_task_uuid: PredictionTaskUUID,
171
+ prediction_type: Literal["video"],
172
+ ) -> ClassificationPredictVideoResponse: ...
173
+
174
+ @overload
175
+ async def _get_results_unified(
176
+ self,
177
+ prediction_task_uuid: PredictionTaskUUID,
178
+ prediction_type: PredictionType,
179
+ ) -> Union[
180
+ ClassificationPredictImageResponse, ClassificationPredictVideoResponse
181
+ ]: ...
182
+
183
+ async def _get_results_unified(
184
+ self, prediction_task_uuid: PredictionTaskUUID, prediction_type: PredictionType
185
+ ) -> Union[ClassificationPredictImageResponse, ClassificationPredictVideoResponse]:
186
+ url = f"{BASE_API_URL}/prediction-task/results?predictionTaskUuid={prediction_task_uuid}"
187
+ headers = {"Authorization": f"Bearer {self._client.api_key}"}
188
+
189
+ @self._backoff_on_429
190
+ async def _make_request():
191
+ async with aiohttp.ClientSession() as session:
192
+ async with session.get(url, headers=headers) as resp:
193
+ resp.raise_for_status()
194
+ payload = await resp.json()
195
+ return payload
196
+
197
+ try:
198
+ payload = await _make_request()
199
+ except ClientError as error:
200
+ raise PredictionTaskResultsUnavailableError(
201
+ f"Error getting prediction task results: {error}"
202
+ )
203
+
204
+ # Add the prediction task uuid to the response before returning
205
+ payload["prediction_task_uuid"] = prediction_task_uuid
206
+
207
+ if prediction_type == "image":
208
+ return ClassificationPredictImageResponse.model_validate(payload)
209
+ elif prediction_type == "video":
210
+ return ClassificationPredictVideoResponse.model_validate(payload)
211
+ else:
212
+ raise ValueError(f"Unsupported prediction type: {prediction_type}")
213
+
214
+ ##### Internal API methods #####
215
+ @overload
216
+ async def _predict_unified(
217
+ self,
218
+ media: Image,
219
+ model_name: str,
220
+ frames_per_second: Optional[int],
221
+ timeout_seconds: Optional[int] = None,
222
+ ) -> ClassificationPredictImageResponse: ...
223
+
224
+ @overload
225
+ async def _predict_unified(
226
+ self,
227
+ media: Video,
228
+ model_name: str,
229
+ frames_per_second: Optional[int],
230
+ timeout_seconds: Optional[int] = None,
231
+ ) -> ClassificationPredictVideoResponse: ...
232
+
233
+ async def _predict_unified(
234
+ self,
235
+ media: Union[Image, Video],
236
+ model_name: str,
237
+ frames_per_second: Optional[int],
238
+ timeout_seconds: Optional[int] = None,
239
+ ) -> Union[ClassificationPredictImageResponse, ClassificationPredictVideoResponse]:
240
+ prediction_task_begin_response = await self._begin_prediction_task(
241
+ mime_type=media.mime_type,
242
+ frames_per_second=frames_per_second,
243
+ )
244
+
245
+ await self._upload_media_to_prediction_task(
246
+ media, prediction_task_begin_response.signed_urls[0]
247
+ )
248
+
249
+ predict_url = f"{BASE_API_URL}/predict"
250
+ predict_data = {
251
+ "model_name": model_name,
252
+ "prediction_task_uuid": prediction_task_begin_response.prediction_task_uuid,
253
+ }
254
+ predict_headers = {
255
+ "Authorization": f"Bearer {self._client.api_key}",
256
+ }
257
+
258
+ @self._backoff_on_429
259
+ async def _make_request():
260
+ async with aiohttp.ClientSession() as session:
261
+ async with session.post(
262
+ predict_url, data=predict_data, headers=predict_headers
263
+ ) as resp:
264
+ resp.raise_for_status()
265
+
266
+ try:
267
+ await _make_request()
268
+ except ClientError as error:
269
+ raise PredictionTaskError("Error initiating prediction:", error)
270
+
271
+ status = await self._wait_for_prediction_task_completion(
272
+ prediction_task_uuid=prediction_task_begin_response.prediction_task_uuid,
273
+ timeout_seconds=timeout_seconds,
274
+ )
275
+
276
+ if _is_task_failed(status.status):
277
+ raise PredictionTaskError(f"Prediction task failed: {status.status}")
278
+
279
+ return await self._get_results_unified(
280
+ prediction_task_uuid=status.prediction_task_uuid,
281
+ prediction_type=prediction_task_begin_response.prediction_type,
282
+ )
283
+
284
+ async def _wait_for_prediction_task_completion(
285
+ self,
286
+ prediction_task_uuid: PredictionTaskUUID,
287
+ polling_interval: float = 1.0,
288
+ timeout_seconds: Optional[int] = None,
289
+ ) -> PredictionTaskStatusResponse:
290
+ start_time = time.monotonic()
291
+ while True:
292
+ # Check if we've exceeded the timeout
293
+ if timeout_seconds is not None:
294
+ elapsed = time.monotonic() - start_time
295
+ if elapsed >= timeout_seconds:
296
+ raise PredictionTimeoutException(
297
+ f"Prediction task {prediction_task_uuid} did not complete within {timeout_seconds} seconds."
298
+ )
299
+
300
+ status = await self.status(prediction_task_uuid)
301
+ if _is_task_complete(status.status):
302
+ return status
303
+
304
+ await asyncio.sleep(polling_interval)
305
+
306
+ async def _upload_media_to_prediction_task(
307
+ self, media: Media, signed_url: _MediaUploadUrl
308
+ ) -> None:
309
+ # Build multipart form: include all presigned fields + the file
310
+ form = aiohttp.FormData()
311
+ for k, v in signed_url.presigned_post_request.fields.items():
312
+ form.add_field(k, str(v))
313
+
314
+ file_obj = media.bytes_io()
315
+ try:
316
+ file_obj.seek(0)
317
+ except Exception:
318
+ pass # if it's already at start or non-seekable
319
+
320
+ form.add_field(
321
+ "file",
322
+ file_obj,
323
+ filename="file",
324
+ )
325
+
326
+ try:
327
+ async with aiohttp.ClientSession() as session:
328
+ async with session.post(
329
+ signed_url.presigned_post_request.url,
330
+ data=form,
331
+ ) as resp:
332
+ resp.raise_for_status()
333
+ except ClientError as error:
334
+ raise PredictionUploadError(
335
+ "Error uploading media to prediction task:", error
336
+ )
337
+
338
+ async def _begin_prediction_task(
339
+ self,
340
+ mime_type: str,
341
+ frames_per_second: Optional[int],
342
+ ) -> _PredictionTaskBeginResponse:
343
+ url = f"{BASE_API_URL}/prediction-task/begin"
344
+
345
+ form_data = aiohttp.FormData()
346
+ form_data.add_field("mimetype", mime_type)
347
+ if frames_per_second is not None:
348
+ form_data.add_field("frames_per_second", str(frames_per_second))
349
+
350
+ headers = {
351
+ "Authorization": f"Bearer {self._client.api_key}",
352
+ }
353
+
354
+ @self._backoff_on_429
355
+ async def _make_request():
356
+ async with aiohttp.ClientSession() as session:
357
+ async with session.post(url, data=form_data, headers=headers) as resp:
358
+ resp.raise_for_status()
359
+ payload = await resp.json()
360
+ return payload
361
+
362
+ try:
363
+ payload = await _make_request()
364
+ except ClientError as error:
365
+ raise PredictionTaskBeginError("Error beginning prediction task:", error)
366
+
367
+ return _PredictionTaskBeginResponse.model_validate(payload)
@@ -0,0 +1,29 @@
1
+ import os
2
+ from typing import Callable, Optional
3
+
4
+ import backoff
5
+
6
+ from dragoneye.classification import Classification
7
+
8
+
9
+ class Dragoneye:
10
+ def __init__(
11
+ self,
12
+ api_key: Optional[str] = None,
13
+ max_retries: int = 10,
14
+ max_backoff_time: int = 120,
15
+ backoff_jitter: Callable[[float], float] = backoff.full_jitter,
16
+ ):
17
+ if api_key is None:
18
+ api_key = os.getenv("DRAGONEYE_API_KEY")
19
+
20
+ assert api_key is not None, (
21
+ "API key is required - set the DRAGONEYE_API_KEY environment variable or pass it to the [Dragoneye] constructor"
22
+ )
23
+
24
+ self.api_key = api_key
25
+ self.max_retries = max_retries
26
+ self.max_backoff_time = max_backoff_time
27
+ self.backoff_jitter = backoff_jitter
28
+
29
+ self.classification = Classification(self)
@@ -0,0 +1,2 @@
1
+ PREDICTED_STATUS = "predicted"
2
+ FAILED_STATUS_PREFIX = "failed"
@@ -0,0 +1,49 @@
1
+ from typing import Dict, Sequence
2
+
3
+ from pydantic import BaseModel
4
+
5
+ from dragoneye.types.common import (
6
+ NormalizedBbox,
7
+ PredictionTaskState,
8
+ PredictionTaskUUID,
9
+ PredictionType,
10
+ TaxonID,
11
+ TaxonPrediction,
12
+ )
13
+
14
+
15
+ class PredictionTaskStatusResponse(BaseModel):
16
+ prediction_task_uuid: PredictionTaskUUID
17
+ prediction_type: PredictionType
18
+ status: PredictionTaskState
19
+
20
+
21
+ class ClassificationTraitRootPrediction(BaseModel):
22
+ id: TaxonID
23
+ name: str
24
+ displayName: str
25
+ taxons: Sequence[TaxonPrediction]
26
+
27
+
28
+ class ClassificationObjectPrediction(BaseModel):
29
+ normalizedBbox: NormalizedBbox
30
+ category: TaxonPrediction
31
+ traits: Sequence[ClassificationTraitRootPrediction]
32
+
33
+
34
+ class ClassificationPredictImageResponse(BaseModel):
35
+ predictions: Sequence[ClassificationObjectPrediction]
36
+ prediction_task_uuid: PredictionTaskUUID
37
+
38
+
39
+ class ClassificationVideoObjectPrediction(ClassificationObjectPrediction):
40
+ frame_id: str
41
+ timestamp_microseconds: int
42
+
43
+
44
+ class ClassificationPredictVideoResponse(BaseModel):
45
+ timestamp_us_to_predictions: Dict[
46
+ int, Sequence[ClassificationVideoObjectPrediction]
47
+ ]
48
+ frames_per_second: int
49
+ prediction_task_uuid: PredictionTaskUUID
@@ -1,8 +1,11 @@
1
1
  from enum import Enum
2
- from typing import NewType, Optional, Sequence, Tuple
2
+ from typing import Literal, NewType, Optional, Sequence, Tuple
3
3
 
4
4
  from pydantic import BaseModel
5
5
 
6
+ PredictionType = Literal["image", "video"]
7
+ PredictionTaskState = NewType("PredictionTaskState", str)
8
+
6
9
  NormalizedBbox = NewType("NormalizedBbox", Tuple[float, float, float, float])
7
10
 
8
11
 
@@ -13,6 +16,8 @@ class TaxonType(str, Enum):
13
16
 
14
17
  TaxonID = NewType("TaxonID", int)
15
18
 
19
+ PredictionTaskUUID = NewType("PredictionTaskUUID", str)
20
+
16
21
 
17
22
  class TaxonPrediction(BaseModel):
18
23
  id: TaxonID
@@ -0,0 +1,18 @@
1
+ class PredictionTaskError(Exception):
2
+ pass
3
+
4
+
5
+ class PredictionUploadError(Exception):
6
+ pass
7
+
8
+
9
+ class PredictionTaskBeginError(Exception):
10
+ pass
11
+
12
+
13
+ class PredictionTaskResultsUnavailableError(Exception):
14
+ pass
15
+
16
+
17
+ class PredictionTimeoutException(Exception):
18
+ pass
@@ -0,0 +1,186 @@
1
+ from __future__ import annotations
2
+
3
+ import mimetypes
4
+ import os
5
+ from dataclasses import dataclass
6
+ from io import BufferedReader, BytesIO
7
+ from pathlib import Path
8
+ from typing import BinaryIO, ClassVar, Optional, Tuple, Union
9
+
10
+ from typing_extensions import Self
11
+
12
+
13
+ @dataclass(frozen=True)
14
+ class Media:
15
+ """Generic binary media + mime_type with conservative, non-destructive access."""
16
+
17
+ file_or_bytes: Union[bytes, BytesIO, BinaryIO, BufferedReader]
18
+ mime_type: str
19
+
20
+ # Subclasses set this to enforce a family of mimetypes, e.g. ("image/",)
21
+ ACCEPT_PREFIXES: ClassVar[Tuple[str, ...]] = ()
22
+
23
+ def __post_init__(self) -> None:
24
+ # Enforce subtype-specific mimetype families when specified.
25
+ if self.ACCEPT_PREFIXES and not any(
26
+ self.mime_type.startswith(p) for p in self.ACCEPT_PREFIXES
27
+ ):
28
+ raise ValueError(
29
+ f"{self.__class__.__name__} requires mime_type starting with "
30
+ f"{' or '.join(self.ACCEPT_PREFIXES)}; got {self.mime_type!r}"
31
+ )
32
+
33
+ # ---------- Convenience constructors ----------
34
+
35
+ @classmethod
36
+ def from_bytes(cls, data: bytes, mime_type: str) -> Self:
37
+ return cls(file_or_bytes=data, mime_type=mime_type)
38
+
39
+ @classmethod
40
+ def from_stream(cls, stream: BinaryIO, *, mime_type: str) -> Self:
41
+ """
42
+ Accepts any readable binary stream (e.g., open('file', 'rb')).
43
+ Keeps the stream as-is; reading is deferred to bytes_io().
44
+ """
45
+ return cls(file_or_bytes=stream, mime_type=mime_type)
46
+
47
+ @classmethod
48
+ def from_path(
49
+ cls,
50
+ path: Union[str, os.PathLike[str]],
51
+ *,
52
+ mime_type: Optional[str] = None,
53
+ guess_from_extension: bool = True,
54
+ read_into_memory: bool = False,
55
+ ) -> Self:
56
+ """
57
+ Create a Media (or subclass) from a filesystem path.
58
+
59
+ - `path`: Path to the file on disk.
60
+ - `mime_type`: Explicit mime type. If omitted and `guess_from_extension=True`,
61
+ we'll try to guess from the file extension.
62
+ - `read_into_memory=True`: load file bytes into memory (closes file immediately).
63
+ Otherwise, keep an open file stream.
64
+ """
65
+ path = Path(path)
66
+ if not path.exists():
67
+ raise FileNotFoundError(path)
68
+
69
+ mt = mime_type or (
70
+ mimetypes.guess_type(path.name)[0] if guess_from_extension else None
71
+ )
72
+ if mt is None:
73
+ raise ValueError(
74
+ f"mime_type is required for {path} (no extension-based guess available)."
75
+ )
76
+
77
+ if read_into_memory:
78
+ data = path.read_bytes()
79
+ return cls(file_or_bytes=data, mime_type=mt)
80
+ else:
81
+ f = path.open("rb")
82
+ return cls(file_or_bytes=f, mime_type=mt)
83
+
84
+ # ---------- Utilities ----------
85
+
86
+ def bytes_io(self) -> BytesIO:
87
+ """
88
+ Returns a fresh BytesIO with the full content.
89
+ - If we wrap a BytesIO, we non-destructively rewind and copy.
90
+ - If we wrap a stream, we read it (without assuming seekability).
91
+ - If we hold bytes, we just wrap them.
92
+ """
93
+ src = self.file_or_bytes
94
+
95
+ if isinstance(src, bytes):
96
+ return BytesIO(src)
97
+
98
+ if isinstance(src, BytesIO):
99
+ # Non-destructively copy contents
100
+ pos = src.tell()
101
+ try:
102
+ src.seek(0)
103
+ except Exception:
104
+ pass
105
+ data = src.read()
106
+ try:
107
+ src.seek(pos)
108
+ except Exception:
109
+ pass
110
+ return BytesIO(data)
111
+
112
+ # For any readable object with .read()
113
+ if hasattr(src, "read"):
114
+ pos = _tell_safe(src)
115
+ data = src.read()
116
+ _seek_safe(src, pos)
117
+ return BytesIO(data)
118
+
119
+ raise TypeError(
120
+ "Invalid media source: expected bytes, BytesIO, or a readable binary stream."
121
+ )
122
+
123
+ def size_bytes(self) -> Optional[int]:
124
+ """
125
+ Best-effort size inference without consuming the stream.
126
+ Returns None if size can't be determined cheaply.
127
+ """
128
+ src = self.file_or_bytes
129
+ if isinstance(src, bytes):
130
+ return len(src)
131
+ if isinstance(src, BytesIO):
132
+ pos = src.tell()
133
+ try:
134
+ src.seek(0, os.SEEK_END)
135
+ end = src.tell()
136
+ finally:
137
+ _seek_safe(src, pos)
138
+ return end
139
+ if hasattr(src, "fileno"):
140
+ try:
141
+ return os.fstat(src.fileno()).st_size # type: ignore[arg-type]
142
+ except Exception:
143
+ return None
144
+ # Path-based size if it looks like a buffered reader with .name
145
+ if hasattr(src, "name"):
146
+ try:
147
+ return Path(src.name).stat().st_size # type: ignore[arg-type]
148
+ except Exception:
149
+ return None
150
+ return None
151
+
152
+
153
+ @dataclass(frozen=True)
154
+ class Image(Media):
155
+ """Media restricted to image/* mimetypes."""
156
+
157
+ ACCEPT_PREFIXES: ClassVar[Tuple[str, ...]] = ("image/",)
158
+
159
+
160
+ @dataclass(frozen=True)
161
+ class Video(Media):
162
+ """Media restricted to video/* mimetypes."""
163
+
164
+ ACCEPT_PREFIXES: ClassVar[Tuple[str, ...]] = ("video/",)
165
+
166
+
167
+ # ---------- Helpers ----------
168
+
169
+
170
+ def _tell_safe(stream: BinaryIO) -> Optional[int]:
171
+ try:
172
+ if hasattr(stream, "tell"):
173
+ return stream.tell()
174
+ except Exception:
175
+ pass
176
+ return None
177
+
178
+
179
+ def _seek_safe(stream: BinaryIO, pos: Optional[int]) -> None:
180
+ if pos is None:
181
+ return
182
+ try:
183
+ if hasattr(stream, "seek"):
184
+ stream.seek(pos)
185
+ except Exception:
186
+ pass
@@ -1,21 +0,0 @@
1
- MIT License
2
-
3
- Copyright (c) 2024 dragoneyeAI
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in all
13
- copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
- SOFTWARE.
@@ -1,7 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: dragoneye-python
3
- Version: 0.2.0
4
- Requires-Python: >=3.8
5
- License-File: LICENSE
6
- Requires-Dist: requests
7
- Requires-Dist: pydantic>=2
@@ -1,7 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: dragoneye-python
3
- Version: 0.2.0
4
- Requires-Python: >=3.8
5
- License-File: LICENSE
6
- Requires-Dist: requests
7
- Requires-Dist: pydantic>=2
@@ -1,2 +0,0 @@
1
- requests
2
- pydantic>=2
@@ -1,2 +0,0 @@
1
- requests
2
- pydantic>=2
@@ -1,117 +0,0 @@
1
- import io
2
- from typing import TYPE_CHECKING, BinaryIO, Sequence
3
-
4
- import requests
5
- from pydantic import BaseModel
6
-
7
- from .types.common import BASE_API_URL, NormalizedBbox, TaxonID, TaxonPrediction
8
- from .types.image import Image, assert_consistent_data_type
9
-
10
- if TYPE_CHECKING:
11
- from .client import Dragoneye
12
-
13
-
14
- class ClassificationTraitRootPrediction(BaseModel):
15
- id: TaxonID
16
- name: str
17
- displayName: str
18
- taxons: Sequence[TaxonPrediction]
19
-
20
-
21
- class ClassificationObjectPrediction(BaseModel):
22
- normalizedBbox: NormalizedBbox
23
- category: TaxonPrediction
24
- traits: Sequence[ClassificationTraitRootPrediction]
25
-
26
-
27
- class ClassificationPredictImageResponse(BaseModel):
28
- predictions: Sequence[ClassificationObjectPrediction]
29
-
30
-
31
- class ClassificationProductPrediction(BaseModel):
32
- category: TaxonPrediction
33
- traits: Sequence[ClassificationTraitRootPrediction]
34
-
35
-
36
- class ClassificationPredictProductResponse(BaseModel):
37
- predictions: Sequence[ClassificationProductPrediction]
38
-
39
-
40
- class Classification:
41
- def __init__(self, client: "Dragoneye"):
42
- self._client = client
43
-
44
- def predict(
45
- self, image: Image, model_name: str
46
- ) -> ClassificationPredictImageResponse:
47
- url = f"{BASE_API_URL}/predict"
48
-
49
- files = {}
50
- data = {}
51
-
52
- if image.file_or_bytes is not None:
53
- if isinstance(image.file_or_bytes, bytes):
54
- files["image_file"] = io.BytesIO(image.file_or_bytes)
55
- elif isinstance(image.file_or_bytes, BinaryIO): # pyright: ignore [reportUnnecessaryIsInstance]
56
- files["image_file"] = image.file_or_bytes
57
- else:
58
- raise ValueError("Invalid image type: Must be bytes or BinaryIO")
59
- elif image.url is not None:
60
- data["image_url"] = image.url
61
- else:
62
- raise ValueError(
63
- "Missing image: Either image file or image url must be specified"
64
- )
65
-
66
- data["model_name"] = model_name
67
-
68
- headers = {"Authorization": f"Bearer {self._client.api_key}"}
69
-
70
- try:
71
- response = requests.post(url, files=files, data=data, headers=headers)
72
- response.raise_for_status()
73
- except requests.RequestException as error:
74
- raise Exception(
75
- "Error during Dragoneye Classification prediction request:", error
76
- )
77
-
78
- return ClassificationPredictImageResponse.model_validate(response.json())
79
-
80
- def predict_product(
81
- self, images: Sequence[Image], model_name: str
82
- ) -> ClassificationPredictProductResponse:
83
- url = f"{BASE_API_URL}/predict-product"
84
-
85
- files = []
86
- data = {}
87
-
88
- assert_consistent_data_type(images)
89
-
90
- for image in images:
91
- if image.file_or_bytes is not None:
92
- if isinstance(image.file_or_bytes, bytes):
93
- files.append(("image_file", io.BytesIO(image.file_or_bytes)))
94
- elif (
95
- isinstance(image.file_or_bytes, BinaryIO) # pyright: ignore [reportUnnecessaryIsInstance]
96
- or issubclass(type(image.file_or_bytes), BinaryIO)
97
- or isinstance(image.file_or_bytes, io.BufferedReader)
98
- ):
99
- files.append(("image_file", image.file_or_bytes))
100
- else:
101
- raise ValueError("Invalid image type: Must be bytes or BinaryIO")
102
- elif image.url is not None:
103
- data.setdefault("image_urls", []).append(image.url)
104
-
105
- data["model_name"] = model_name
106
-
107
- headers = {"Authorization": f"Bearer {self._client.api_key}"}
108
-
109
- try:
110
- response = requests.post(url, files=files, data=data, headers=headers)
111
- response.raise_for_status()
112
- except requests.RequestException as error:
113
- raise Exception(
114
- "Error during Dragoneye Classification prediction request:", error
115
- )
116
-
117
- return ClassificationPredictProductResponse.model_validate(response.json())
@@ -1,18 +0,0 @@
1
- import os
2
- from typing import Optional
3
-
4
- from dragoneye.classification import Classification
5
-
6
-
7
- class Dragoneye:
8
- def __init__(self, api_key: Optional[str] = None):
9
- if api_key is None:
10
- api_key = os.getenv("DRAGONEYE_API_KEY")
11
-
12
- assert (
13
- api_key is not None
14
- ), "API key is required - set the DRAGONEYE_API_KEY environment variable or pass it to the [Dragoneye] constructor"
15
-
16
- self.api_key = api_key
17
-
18
- self.classification = Classification(self)
@@ -1,12 +0,0 @@
1
- from typing import BinaryIO, NamedTuple, Optional, Sequence, Union
2
-
3
-
4
- class Image(NamedTuple):
5
- file_or_bytes: Optional[Union[bytes, BinaryIO]] = None
6
- url: Optional[str] = None
7
-
8
-
9
- def assert_consistent_data_type(images: Sequence[Image]) -> None:
10
- assert all(image.file_or_bytes is not None for image in images) ^ all(
11
- image.url is not None for image in images
12
- )