nedo-vision-worker 1.3.1__py3-none-any.whl → 1.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,5 +6,5 @@ A library for running worker agents in the Nedo Vision platform.
6
6
 
7
7
  from .worker_service import WorkerService
8
8
 
9
- __version__ = "1.3.1"
9
+ __version__ = "1.3.5"
10
10
  __all__ = ["WorkerService"]
@@ -153,7 +153,7 @@ class DataSenderWorker:
153
153
  def _calculate_sleep_interval(self, ppe_pending: int, violation_pending: int) -> float:
154
154
  """
155
155
  Calculate optimal sleep interval based on pending data.
156
- Conservative approach to prevent server overload.
156
+ Conservative approach to prevent server overload and storage rate limits.
157
157
 
158
158
  Args:
159
159
  ppe_pending: Number of pending PPE detections
@@ -169,11 +169,13 @@ class DataSenderWorker:
169
169
  elif total_pending < 50:
170
170
  return self.send_interval
171
171
  elif total_pending < 200:
172
- return 3.0
172
+ return 4.0
173
173
  elif total_pending < 500:
174
- return 2.0
174
+ return 3.0
175
+ elif total_pending < 1000:
176
+ return 2.5
175
177
  else:
176
- return 1.5
178
+ return 2.0
177
179
 
178
180
  def _run_worker_source_updater(self):
179
181
  """Dedicated loop for updating worker sources at a different interval."""
@@ -1,4 +1,5 @@
1
1
  import logging
2
+ import os
2
3
  import threading
3
4
  import time
4
5
  from ..services.PPEDetectionClient import PPEDetectionClient
@@ -50,41 +51,91 @@ class PPEDetectionManager:
50
51
 
51
52
  logger.info("📡 [APP] PPE detection monitoring started.")
52
53
 
53
- def _calculate_batch_size(self, pending_count: int) -> int:
54
+ def _calculate_fetch_size(self, pending_count: int) -> int:
54
55
  """
55
- Calculates optimal batch size based on pending items.
56
- Limited to 50 to stay within 50MB gRPC message size limit.
56
+ Dynamically calculates how many records to fetch based on pending count.
57
+ Reduced to prevent storage rate limits (each detection = 2 images = 2 dir checks).
57
58
 
58
59
  Args:
59
60
  pending_count (int): Number of pending detections
60
61
 
61
62
  Returns:
62
- int: Optimal batch size (max 50)
63
+ int: Number of records to fetch
63
64
  """
64
- if pending_count < 20:
65
- return 10
66
- elif pending_count < 100:
67
- return 20
68
- elif pending_count < 500:
69
- return 30
65
+ if pending_count <= 50:
66
+ return min(pending_count, 5)
67
+ elif pending_count <= 200:
68
+ return min(pending_count, 10)
69
+ elif pending_count <= 1000:
70
+ return min(pending_count, 15)
70
71
  else:
71
- return 50
72
+ return min(pending_count, 20)
73
+
74
+ def _calculate_batch_by_size(self, all_detections: list, max_size_mb: int = 40) -> list:
75
+ """
76
+ Calculates batch based on actual image file sizes to stay within gRPC limit.
77
+
78
+ Args:
79
+ all_detections (list): All pending detections
80
+ max_size_mb (int): Maximum batch size in MB (default 40MB for 50MB limit with margin)
81
+
82
+ Returns:
83
+ list: Detections that fit within size limit
84
+ """
85
+ import os
86
+
87
+ batch = []
88
+ total_size = 0
89
+ max_size_bytes = max_size_mb * 1024 * 1024
90
+
91
+ for detection in all_detections:
92
+ try:
93
+ image_size = os.path.getsize(detection['image']) if os.path.exists(detection['image']) else 0
94
+ tile_size = os.path.getsize(detection['image_tile']) if os.path.exists(detection['image_tile']) else 0
95
+ detection_size = image_size + tile_size
96
+
97
+ if total_size + detection_size > max_size_bytes:
98
+ if batch:
99
+ break
100
+ else:
101
+ logger.warning(f"⚠️ Single detection exceeds {max_size_mb}MB, skipping")
102
+ continue
103
+
104
+ batch.append(detection)
105
+ total_size += detection_size
106
+
107
+ except Exception as e:
108
+ logger.error(f"❌ Error checking file size: {e}")
109
+ continue
110
+
111
+ return batch
72
112
 
73
113
  def send_ppe_detection_batch(self):
74
- """Sends a batch of collected PPE detection data to the server with dynamic batch sizing."""
114
+ """Sends a batch of collected PPE detection data to the server with dynamic size-based batching."""
75
115
  try:
76
116
  pending_count = self.ppe_detection_repo.get_total_pending_count()
77
117
 
78
118
  if pending_count == 0:
79
119
  return
80
120
 
81
- batch_size = self._calculate_batch_size(pending_count)
82
- self.ppe_detection_data = self.ppe_detection_repo.get_latest_detections(batch_size)
121
+ fetch_size = self._calculate_fetch_size(pending_count)
122
+ all_detections = self.ppe_detection_repo.get_latest_detections(fetch_size)
123
+
124
+ if not all_detections:
125
+ return
126
+
127
+ self.ppe_detection_data = self._calculate_batch_by_size(all_detections)
83
128
 
84
129
  if not self.ppe_detection_data:
130
+ logger.warning("⚠️ [APP] No valid detections within size limit")
85
131
  return
86
132
 
87
- logger.info(f"📤 [APP] Sending {len(self.ppe_detection_data)} PPE detections ({pending_count} pending)")
133
+ batch_size_mb = sum(
134
+ os.path.getsize(d['image']) + os.path.getsize(d['image_tile'])
135
+ for d in self.ppe_detection_data if os.path.exists(d['image']) and os.path.exists(d['image_tile'])
136
+ ) / (1024 * 1024)
137
+
138
+ logger.info(f"📤 [APP] Sending {len(self.ppe_detection_data)} PPE detections (~{batch_size_mb:.1f}MB, {pending_count} pending)")
88
139
 
89
140
  response = self.ppe_detection_client.send_upsert_batch(
90
141
  worker_id=self.worker_id,
@@ -1,4 +1,5 @@
1
1
  import logging
2
+ import os
2
3
  import threading
3
4
  import time
4
5
 
@@ -50,41 +51,89 @@ class RestrictedAreaManager:
50
51
 
51
52
  logger.info("📡 [APP] Restricted area violation monitoring started.")
52
53
 
53
- def _calculate_batch_size(self, pending_count: int) -> int:
54
+ def _calculate_fetch_size(self, pending_count: int) -> int:
54
55
  """
55
- Calculates optimal batch size based on pending items.
56
- Limited to 50 to stay within 50MB gRPC message size limit.
56
+ Dynamically calculates how many records to fetch based on pending count.
57
+ Reduced to prevent storage rate limits (each violation = 2 images = 2 dir checks).
57
58
 
58
59
  Args:
59
60
  pending_count (int): Number of pending violations
60
61
 
61
62
  Returns:
62
- int: Optimal batch size (max 50)
63
+ int: Number of records to fetch
63
64
  """
64
- if pending_count < 20:
65
- return 10
66
- elif pending_count < 100:
67
- return 20
68
- elif pending_count < 500:
69
- return 30
65
+ if pending_count <= 50:
66
+ return min(pending_count, 5)
67
+ elif pending_count <= 200:
68
+ return min(pending_count, 10)
69
+ elif pending_count <= 1000:
70
+ return min(pending_count, 15)
70
71
  else:
71
- return 50
72
+ return min(pending_count, 20)
73
+
74
+ def _calculate_batch_by_size(self, all_violations: list, max_size_mb: int = 40) -> list:
75
+ """
76
+ Calculates batch based on actual image file sizes to stay within gRPC limit.
77
+
78
+ Args:
79
+ all_violations (list): All pending violations
80
+ max_size_mb (int): Maximum batch size in MB (default 40MB for 50MB limit with margin)
81
+
82
+ Returns:
83
+ list: Violations that fit within size limit
84
+ """
85
+ batch = []
86
+ total_size = 0
87
+ max_size_bytes = max_size_mb * 1024 * 1024
88
+
89
+ for violation in all_violations:
90
+ try:
91
+ image_size = os.path.getsize(violation['image']) if os.path.exists(violation['image']) else 0
92
+ tile_size = os.path.getsize(violation['image_tile']) if os.path.exists(violation['image_tile']) else 0
93
+ violation_size = image_size + tile_size
94
+
95
+ if total_size + violation_size > max_size_bytes:
96
+ if batch:
97
+ break
98
+ else:
99
+ logger.warning(f"⚠️ Single violation exceeds {max_size_mb}MB, skipping")
100
+ continue
101
+
102
+ batch.append(violation)
103
+ total_size += violation_size
104
+
105
+ except Exception as e:
106
+ logger.error(f"❌ Error checking file size: {e}")
107
+ continue
108
+
109
+ return batch
72
110
 
73
111
  def send_violation_batch(self):
74
- """Sends a batch of collected violation data to the server with dynamic batch sizing."""
112
+ """Sends a batch of collected violation data to the server with dynamic size-based batching."""
75
113
  try:
76
114
  pending_count = self.repo.get_total_pending_count()
77
115
 
78
116
  if pending_count == 0:
79
117
  return
80
118
 
81
- batch_size = self._calculate_batch_size(pending_count)
82
- self.violations_data = self.repo.get_latest_violations(batch_size)
119
+ fetch_size = self._calculate_fetch_size(pending_count)
120
+ all_violations = self.repo.get_latest_violations(fetch_size)
121
+
122
+ if not all_violations:
123
+ return
124
+
125
+ self.violations_data = self._calculate_batch_by_size(all_violations)
83
126
 
84
127
  if not self.violations_data:
128
+ logger.warning("⚠️ [APP] No valid violations within size limit")
85
129
  return
86
130
 
87
- logger.info(f"📤 [APP] Sending {len(self.violations_data)} violations ({pending_count} pending)")
131
+ batch_size_mb = sum(
132
+ os.path.getsize(v['image']) + os.path.getsize(v['image_tile'])
133
+ for v in self.violations_data if os.path.exists(v['image']) and os.path.exists(v['image_tile'])
134
+ ) / (1024 * 1024)
135
+
136
+ logger.info(f"📤 [APP] Sending {len(self.violations_data)} violations (~{batch_size_mb:.1f}MB, {pending_count} pending)")
88
137
 
89
138
  response = self.client.send_upsert_batch(
90
139
  worker_id=self.worker_id,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nedo-vision-worker
3
- Version: 1.3.1
3
+ Version: 1.3.5
4
4
  Summary: Nedo Vision Worker Service Library for AI Vision Processing
5
5
  Author-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
6
6
  Maintainer-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
@@ -1,4 +1,4 @@
1
- nedo_vision_worker/__init__.py,sha256=ymUax1kADKc7nS9WyGBL5U3KmJaYaJsSNFXbaxeB5iY,203
1
+ nedo_vision_worker/__init__.py,sha256=5XGhmiZwTkqHlXOZd-dVjqCEOx21tOtxk24cH0N_kqM,203
2
2
  nedo_vision_worker/cli.py,sha256=ddWspJmSgVkcUYvRdkvTtMNuMTDvNCqLLuMVU9KE3Ik,7457
3
3
  nedo_vision_worker/doctor.py,sha256=wNkpe8gLVd76Y_ViyK2h1ZFdqeSl37MnzZN5frWKu30,48410
4
4
  nedo_vision_worker/worker_service.py,sha256=9zz8hKwDwqwpfS0KPQfftGJtRci0uj_wiwcr_TGf-E0,11039
@@ -80,22 +80,22 @@ nedo_vision_worker/util/SystemMonitor.py,sha256=2kkqj9mOlywAS2fHdN1TaIXSXvCApcIH
80
80
  nedo_vision_worker/util/VideoProbeUtil.py,sha256=cF-vJ7hIDlXfEJby2a0s9tqwkPGVz_6B3Vv4D5pMmIw,12876
81
81
  nedo_vision_worker/util/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
82
82
  nedo_vision_worker/worker/CoreActionWorker.py,sha256=lb7zPY3yui6I3F4rX4Ii7JwpWZahLEO72rh3iWOgFmg,5441
83
- nedo_vision_worker/worker/DataSenderWorker.py,sha256=BNA5mL2guBVp0O5MKULEWd78dY32YjiyWZX7ECFjVFc,9089
83
+ nedo_vision_worker/worker/DataSenderWorker.py,sha256=57bhNhpbherg_gCuUNnKt_1JwSQk1ukbx8mFWNsmrJ4,9171
84
84
  nedo_vision_worker/worker/DataSyncWorker.py,sha256=LmDPt2J1frmXwuR46L6b0MjlFOHfgG-4_0MGQa78zF4,6288
85
85
  nedo_vision_worker/worker/DatasetFrameSender.py,sha256=1SFYj8LJFNi-anBTapsbq8U_NGMM7mnoMKg9NeFAHys,8087
86
86
  nedo_vision_worker/worker/DatasetFrameWorker.py,sha256=Hh_wZuMjwovxsEKFqXSuTRin9eYRBZCbcFKm3CKLMbE,19335
87
- nedo_vision_worker/worker/PPEDetectionManager.py,sha256=FZbPuxY5dc4e919DJHXQSJHPF3btZMrC1xwD0jYYypU,4588
87
+ nedo_vision_worker/worker/PPEDetectionManager.py,sha256=lb46NfhrZ-g_pxhurZl2uozPnWneZzptYOaIdzoFQxM,6775
88
88
  nedo_vision_worker/worker/PipelineActionWorker.py,sha256=xgvryjKtEsMj4BKqWzDIaK_lFny-DfMCj5Y2DxHnWww,5651
89
89
  nedo_vision_worker/worker/PipelineImageWorker.py,sha256=J8VBUG0cwcH3qOJp2zTl30B-XhmPFyvJLjxitKJYq0E,5642
90
90
  nedo_vision_worker/worker/PipelinePreviewWorker.py,sha256=owFiBbktcOZkdImQeykZSeBIR2-mpt6HNkmYIkLRKzE,6397
91
91
  nedo_vision_worker/worker/RabbitMQListener.py,sha256=9gR49MDplgpyb-D5HOH0K77-DJQFvhS2E7biL92SjSU,6950
92
- nedo_vision_worker/worker/RestrictedAreaManager.py,sha256=IWuCqpE-IPSf5Mdo5NUnbRUnY9pSH2471EB2eS0XRWs,4482
92
+ nedo_vision_worker/worker/RestrictedAreaManager.py,sha256=LAIrOhzGlR0gQNuTJZx6zvUmcg5WwGKehthONCMjbCQ,6639
93
93
  nedo_vision_worker/worker/SystemUsageManager.py,sha256=mkh4sT-HkIEY1CJHMEG6LP9ATu39YXvLRLyf995OkoQ,5315
94
94
  nedo_vision_worker/worker/VideoStreamWorker.py,sha256=5n6v1PNO7IB-jj_McALLkUP-cBjJoIEw4UiSAs3vTb0,7606
95
95
  nedo_vision_worker/worker/WorkerManager.py,sha256=2bxXi19fp3p1qjYBStYRdVVgko8dnevXx1_M_sqH5og,5521
96
96
  nedo_vision_worker/worker/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
97
- nedo_vision_worker-1.3.1.dist-info/METADATA,sha256=eZ2JS5SEBuZrrvpkJDNxc4svJHCVLzf4beEcTdRO4iA,14728
98
- nedo_vision_worker-1.3.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
99
- nedo_vision_worker-1.3.1.dist-info/entry_points.txt,sha256=LrglS-8nCi8C_PL_pa6uxdgCe879hBETHDVXAckvs-8,60
100
- nedo_vision_worker-1.3.1.dist-info/top_level.txt,sha256=vgilhlkyD34YsEKkaBabmhIpcKSvF3XpzD2By68L-XI,19
101
- nedo_vision_worker-1.3.1.dist-info/RECORD,,
97
+ nedo_vision_worker-1.3.5.dist-info/METADATA,sha256=S6qk8Fmb9pN2zpeAZ15Eqa8uxv85fSUIL6tWrV7S7OA,14728
98
+ nedo_vision_worker-1.3.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
99
+ nedo_vision_worker-1.3.5.dist-info/entry_points.txt,sha256=LrglS-8nCi8C_PL_pa6uxdgCe879hBETHDVXAckvs-8,60
100
+ nedo_vision_worker-1.3.5.dist-info/top_level.txt,sha256=vgilhlkyD34YsEKkaBabmhIpcKSvF3XpzD2By68L-XI,19
101
+ nedo_vision_worker-1.3.5.dist-info/RECORD,,