tinesight 0.0.2.dev1__tar.gz → 0.0.5.dev1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,10 +1,12 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: tinesight
3
- Version: 0.0.2.dev1
3
+ Version: 0.0.5.dev1
4
4
  Summary: Tinesight SDK
5
5
  Requires-Dist: cryptography>=46.0.3
6
6
  Requires-Dist: pycognito>=2024.5.1
7
+ Requires-Dist: opencv-python>=4.13.0.90 ; extra == 'video'
7
8
  Requires-Python: >=3.12
9
+ Provides-Extra: video
8
10
  Description-Content-Type: text/markdown
9
11
 
10
12
  # Tinesight SDK
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "tinesight"
3
- version = "0.0.2.dev1"
3
+ version = "0.0.5.dev1"
4
4
  description = "Tinesight SDK"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.12"
@@ -9,6 +9,11 @@ dependencies = [
9
9
  "pycognito>=2024.5.1",
10
10
  ]
11
11
 
12
+ [project.optional-dependencies]
13
+ video = [
14
+ "opencv-python>=4.13.0.90",
15
+ ]
16
+
12
17
  [build-system]
13
18
  requires = ["uv_build"]
14
19
  build-backend = "uv_build"
@@ -0,0 +1 @@
1
+ __all__ = ["TinesightClient", "TinesightRegistrar"]
@@ -10,5 +10,5 @@ class TinesightApiMixin:
10
10
 
11
11
  @property
12
12
  def public_ux_api_uri(self) -> str:
13
- api_ref = "p1pco7l9b1" if os.getenv("TINESIGHT_DEV") else "api"
13
+ api_ref = "17bx575oxl" if os.getenv("TINESIGHT_DEV") else "api"
14
14
  return f"https://{api_ref}.execute-api.us-east-1.amazonaws.com"
@@ -0,0 +1,225 @@
1
+ from collections.abc import Callable
2
+ from functools import partial
3
+ from pathlib import Path
4
+ import time
5
+
6
+ import requests
7
+
8
+ from tinesight._api import TinesightApiMixin
9
+
10
+
11
+ class TinesightClient(TinesightApiMixin):
12
+ """
13
+ Client for invoking the Tinesight API from a device.
14
+
15
+ To use this class, a device must be registered with a signed certificate using the
16
+ `TinesightRegistrar`.
17
+
18
+ Examples:
19
+ >>> result = TinesightClient(my_key_path, my_cert_path).classify(my_image_bytes)
20
+ >>> print(result.json)
21
+ >>> {'class': 'deer', 'probability': 0.98}
22
+
23
+ """
24
+
25
+ @property
26
+ def _mtls_post(self) -> Callable:
27
+ """Private wrapper for making invoking requests with a certa"""
28
+ return partial(requests.post, cert=(self.cert_path, self.key_path))
29
+
30
+ def __init__(self, x509_key_path: Path | str, x509_cert_path: Path | str):
31
+ self.key_path: str = str(x509_key_path)
32
+ self.cert_path: str = str(x509_cert_path)
33
+
34
+ def classify(self, image: str | Path | bytes) -> requests.Response:
35
+ """Invokes the classification model for the specified image
36
+
37
+ Args:
38
+ image: Can be a file path (str or Path) or raw image bytes
39
+
40
+ Returns:
41
+ requests.Response containing classification results
42
+ """
43
+ classification_url = self.tenant_base_api_uri + "/classify/v1"
44
+
45
+ if isinstance(image, bytes):
46
+ image_bytes = image
47
+ file_name = "frame.jpg"
48
+ else:
49
+ if isinstance(image, str):
50
+ image = Path(image)
51
+ with open(image, "rb") as fp:
52
+ image_bytes = fp.read()
53
+ file_name = image.name
54
+
55
+ return self._mtls_post(classification_url, files={"file": (file_name, image_bytes)})
56
+
57
+ def classify_video_stream(
58
+ self,
59
+ video_source: str | int = 0,
60
+ frame_skip: int = 10,
61
+ probability_threshold: float = 0.55,
62
+ window_name: str = "Tinesight Classification",
63
+ ) -> None:
64
+ """
65
+ Continuously classify frames from a video stream and display results.
66
+
67
+ Captures video from a source (camera, RTSP stream, etc.), classifies frames
68
+ at regular intervals, and displays the video with classification annotations.
69
+ Press Ctrl+C to stop.
70
+
71
+ Args:
72
+ video_source: Video source - can be device index (0 for default camera),
73
+ RTSP URL (e.g., "rtsp://192.168.1.100:8554/stream"),
74
+ or HTTP stream URL
75
+ frame_skip: Classify every Nth frame (default: 10). Higher values = faster but less frequent updates
76
+ probability_threshold: Minimum confidence threshold to display classification (default: 0.55)
77
+ window_name: Name of the display window
78
+
79
+ Examples:
80
+ >>> client = TinesightClient(key_path, cert_path)
81
+ >>> # Local camera
82
+ >>> client.classify_video_stream(0)
83
+ >>> # Raspberry Pi RTSP stream
84
+ >>> client.classify_video_stream("rtsp://raspberrypi.local:8554/stream")
85
+ >>> # With custom threshold
86
+ >>> client.classify_video_stream(0, probability_threshold=0.75)
87
+ """
88
+ try:
89
+ import cv2
90
+ except ImportError:
91
+ raise ImportError(
92
+ "OpenCV is required for video streaming. Install with: pip install opencv-python"
93
+ )
94
+
95
+ # Open video stream
96
+ cap = cv2.VideoCapture(video_source)
97
+ if not cap.isOpened():
98
+ raise RuntimeError(f"Could not open video source: {video_source}")
99
+
100
+ print(f"Starting video stream from: {video_source}")
101
+ print(f"Classifying every {frame_skip} frames")
102
+ print("Press Ctrl+C to stop")
103
+
104
+ frame_count = 0
105
+ last_classification = None
106
+ last_probability = None
107
+ fps_start = time.time()
108
+ fps_frame_count = 0
109
+ current_fps = 0
110
+ last_latency = 0
111
+
112
+ try:
113
+ while True:
114
+ ret, frame = cap.read()
115
+ if not ret:
116
+ print("Failed to read frame, attempting to reconnect...")
117
+ cap.release()
118
+ time.sleep(1)
119
+ cap = cv2.VideoCapture(video_source)
120
+ continue
121
+
122
+ # Calculate FPS
123
+ fps_frame_count += 1
124
+ if time.time() - fps_start >= 1.0:
125
+ current_fps = int(fps_frame_count / (time.time() - fps_start))
126
+ fps_frame_count = 0
127
+ fps_start = time.time()
128
+
129
+ # Classify every Nth frame
130
+ if frame_count % frame_skip == 0:
131
+ # Encode frame as JPEG
132
+ _, buffer = cv2.imencode(".jpg", frame)
133
+ image_bytes = buffer.tobytes()
134
+
135
+ # Classify with timing
136
+ classify_start = time.time()
137
+ try:
138
+ response = self.classify(image_bytes)
139
+ last_latency = int((time.time() - classify_start) * 1000) # ms
140
+
141
+ if response.status_code == 200:
142
+ result = response.json()
143
+ last_classification = result.get("class", "Unknown")
144
+ last_probability = result.get("probability", 0.0)
145
+ else:
146
+ last_classification = f"Error: {response.status_code}"
147
+ last_probability = None
148
+ except Exception as e:
149
+ last_classification = f"Error: {str(e)}"
150
+ last_probability = None
151
+ last_latency = int((time.time() - classify_start) * 1000)
152
+
153
+ # Draw annotations on frame
154
+ height, width = frame.shape[:2]
155
+
156
+ # Semi-transparent overlay for text background
157
+ overlay = frame.copy()
158
+ cv2.rectangle(overlay, (10, 10), (width - 10, 120), (0, 0, 0), -1)
159
+ frame = cv2.addWeighted(overlay, 0.6, frame, 0.4, 0)
160
+
161
+ # Display classification results only if above threshold
162
+ y_offset = 40
163
+ if (
164
+ last_classification
165
+ and last_probability is not None
166
+ and last_probability >= probability_threshold
167
+ ):
168
+ cv2.putText(
169
+ frame,
170
+ f"Class: {last_classification}",
171
+ (20, y_offset),
172
+ cv2.FONT_HERSHEY_SIMPLEX,
173
+ 0.7,
174
+ (0, 255, 0),
175
+ 2,
176
+ )
177
+ y_offset += 30
178
+ cv2.putText(
179
+ frame,
180
+ f"Confidence: {last_probability:.2%}",
181
+ (20, y_offset),
182
+ cv2.FONT_HERSHEY_SIMPLEX,
183
+ 0.7,
184
+ (0, 255, 0),
185
+ 2,
186
+ )
187
+ elif last_classification and last_probability is not None:
188
+ # Show low confidence indicator
189
+ cv2.putText(
190
+ frame,
191
+ f"Low confidence ({last_probability:.2%})",
192
+ (20, y_offset),
193
+ cv2.FONT_HERSHEY_SIMPLEX,
194
+ 0.6,
195
+ (100, 100, 100),
196
+ 2,
197
+ )
198
+ y_offset += 30
199
+
200
+ # Display metrics
201
+ y_offset += 30
202
+ cv2.putText(
203
+ frame,
204
+ f"FPS: {current_fps:.1f} | Latency: {last_latency:.0f}ms",
205
+ (20, y_offset),
206
+ cv2.FONT_HERSHEY_SIMPLEX,
207
+ 0.6,
208
+ (255, 255, 255),
209
+ 2,
210
+ )
211
+
212
+ # Show frame
213
+ cv2.imshow(window_name, frame)
214
+
215
+ # Check for ESC key or window close
216
+ if cv2.waitKey(1) & 0xFF == 27:
217
+ break
218
+
219
+ frame_count += 1
220
+
221
+ except KeyboardInterrupt:
222
+ print("\nStopping video stream...")
223
+ finally:
224
+ cap.release()
225
+ cv2.destroyAllWindows()
@@ -1,4 +1,3 @@
1
- import io
2
1
  import os
3
2
  from http import HTTPStatus
4
3
  from pathlib import Path
@@ -10,12 +9,12 @@ from cryptography.hazmat.primitives import serialization
10
9
  from cryptography.x509.oid import NameOID
11
10
  from pycognito.utils import RequestsSrpAuth, TokenType
12
11
 
13
- from tinesight.api import TinesightApiMixin
12
+ from tinesight._api import TinesightApiMixin
14
13
 
15
14
  # Cognito configuration - these should be set before using TinesightRegistrar
16
15
  # TODO (NP) figure out how to switch between dev and prod
17
- COGNITO_USER_POOL_ID = os.environ.get("COGNITO_USER_POOL_ID", "us-east-1_g1yNKyU6h")
18
- COGNITO_CLIENT_ID = os.environ.get("COGNITO_CLIENT_ID", "3255n0uofh58rqqiqhodtqtdp7")
16
+ COGNITO_USER_POOL_ID = os.environ.get("COGNITO_USER_POOL_ID", "us-east-1_mNTHmkVBB")
17
+ COGNITO_CLIENT_ID = os.environ.get("COGNITO_CLIENT_ID", "7e77oj9t00qakod07crel9s82t")
19
18
  AWS_REGION = os.environ.get("AWS_REGION", "us-east-1")
20
19
 
21
20
 
@@ -50,7 +49,7 @@ class TinesightRegistrar(TinesightApiMixin):
50
49
  Basic login to using Cognito IDP with the SRP flow. This method is required to be called
51
50
  prior to registering any devices.
52
51
  """
53
- self.auth: RequestsSrpAuth = RequestsSrpAuth(
52
+ self.auth = RequestsSrpAuth(
54
53
  username=username,
55
54
  password=password,
56
55
  user_pool_id=COGNITO_USER_POOL_ID,
@@ -69,8 +68,27 @@ class TinesightRegistrar(TinesightApiMixin):
69
68
  pk_contents, password=key_password.encode() if key_password else None
70
69
  )
71
70
 
71
+ def _is_authorized(self) -> bool:
72
+ if self.auth is None:
73
+ print("Need to call login prior to invoking any API methods")
74
+ return self.auth is not None
75
+
76
+ def unregister_device(self, device_id: str):
77
+ """
78
+ Unregisters a device (idempotent) from your account.
79
+
80
+ :param device_id: unique device identifier
81
+
82
+ :return: dict
83
+ """
84
+ if not self._is_authorized():
85
+ return None
86
+ target_url = self.public_ux_api_uri + "/unregister-device/v1"
87
+ response = requests.post(target_url, json={"device_id": device_id}, auth=self.auth)
88
+ return response.json()
89
+
72
90
  def register_device(
73
- self, device_id: str, pem_key_path: Path, key_password: str = None
91
+ self, device_id: str, pem_key_path: Path, key_password: str = None, renew: bool = False
74
92
  ) -> bytes | None:
75
93
  """
76
94
  Registers a uniquely identified device for your account by creating a certificate
@@ -87,11 +105,11 @@ class TinesightRegistrar(TinesightApiMixin):
87
105
  :param device_id: unique device identifier
88
106
  :param pem_key_path: path to your secret key
89
107
  :param key_password: str, default None - password to your secret key
108
+ :param renew: bool, default False - if True, will force a renewal of the certificate
90
109
 
91
- :return: certificate (str)
110
+ :return: certificate (bytes)
92
111
  """
93
- if self.auth is None:
94
- print("Need to call login prior to registering a device")
112
+ if not self._is_authorized():
95
113
  return None
96
114
 
97
115
  key = self._read_private_key(pem_key_path, key_password)
@@ -125,7 +143,7 @@ class TinesightRegistrar(TinesightApiMixin):
125
143
  )
126
144
 
127
145
  # request the signed certificate
128
- target_url = self.public_ux_api_uri + "/register-device/v1"
146
+ target_url = self.public_ux_api_uri + f"/register-device/v1?renew={int(renew)}"
129
147
  response = requests.post(
130
148
  target_url,
131
149
  data=csr.public_bytes(serialization.Encoding.PEM),
@@ -134,5 +152,10 @@ class TinesightRegistrar(TinesightApiMixin):
134
152
  if response.status_code == HTTPStatus.OK:
135
153
  json_response = response.json()
136
154
  return json_response["certificate"].encode("utf-8")
155
+ elif response.status_code == HTTPStatus.CONFLICT:
156
+ json_response = response.json()
157
+ print(json_response["message"])
158
+ return None
137
159
  else:
160
+ print("Unable to register device")
138
161
  return None
File without changes
@@ -1,36 +0,0 @@
1
- from collections.abc import Callable
2
- from functools import partial
3
- from pathlib import Path
4
-
5
- import requests
6
-
7
- from tinesight.api import TinesightApiMixin
8
-
9
-
10
- class TinesightClient(TinesightApiMixin):
11
- """
12
- Client for invoking the Tinesight API from a device.
13
-
14
- To use this class, a device must be registered with a signed certificate using the
15
- `TinesightRegistrar`.
16
-
17
- Examples:
18
- >>> result = TinesightClient(my_key_path, my_cert_path).classify(my_image_bytes)
19
- >>> print(result.json)
20
- >>> {'class': 'deer', 'probability': 0.98}
21
-
22
- """
23
-
24
- @property
25
- def _mtls_post(self) -> Callable:
26
- """Private wrapper for making invoking requests with a certa"""
27
- return partial(requests.post, cert=(self.cert_path, self.key_path))
28
-
29
- def __init__(self, x509_key_path: Path | str, x509_cert_path: Path | str):
30
- self.key_path: str = str(x509_key_path)
31
- self.cert_path: str = str(x509_cert_path)
32
-
33
- def classify(self, image_bytes: bytes) -> requests.Response:
34
- """Invokes the classification model for the specified image"""
35
- classification_url = self.tenant_base_api_uri + "/classify/v1"
36
- return self._mtls_post(classification_url, files={"file": image_bytes})
File without changes