nedo-vision-worker-core 0.4.0__py3-none-any.whl → 0.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nedo-vision-worker-core might be problematic. Click here for more details.

@@ -7,10 +7,10 @@ A library for running AI vision processing and detection in the Nedo Vision plat
7
7
  from .core_service import CoreService
8
8
  from .callbacks import DetectionType, CallbackTrigger, DetectionData, IntervalMetadata
9
9
 
10
- __version__ = "0.4.0"
10
+ __version__ = "0.4.3"
11
11
  __all__ = [
12
12
  "CoreService",
13
- "DetectionType",
13
+ "DetectionType",
14
14
  "CallbackTrigger",
15
15
  "DetectionData",
16
16
  "IntervalMetadata",
@@ -31,6 +31,11 @@ class PipelineManager:
31
31
  self._stop_lock = threading.Lock()
32
32
  self.on_pipeline_stopped = on_pipeline_stopped
33
33
 
34
+ # Stagger pipeline startup to reduce CPU spikes
35
+ self._last_pipeline_start = 0
36
+ self._pipeline_start_delay = 1.0 # 1 second between pipeline starts
37
+ self._start_lock = threading.Lock()
38
+
34
39
  logging.info(f"🚀 PipelineManager initialized with {max_workers} worker threads")
35
40
 
36
41
  def start_pipeline(self, pipeline, detector):
@@ -46,6 +51,15 @@ class PipelineManager:
46
51
  logging.warning(f"⚠️ Pipeline {pipeline_id} is already running.")
47
52
  return
48
53
 
54
+ # Stagger pipeline starts to reduce CPU spikes
55
+ with self._start_lock:
56
+ time_since_last_start = time.time() - self._last_pipeline_start
57
+ if time_since_last_start < self._pipeline_start_delay:
58
+ delay = self._pipeline_start_delay - time_since_last_start
59
+ logging.info(f"⏳ Staggering pipeline {pipeline_id} start by {delay:.2f}s to reduce CPU spike")
60
+ time.sleep(delay)
61
+ self._last_pipeline_start = time.time()
62
+
49
63
  logging.info(f"🚀 Starting Pipeline processing for pipeline: {pipeline_id} | Source: {worker_source_id} ({pipeline.name})")
50
64
 
51
65
  # Acquire video stream
@@ -47,6 +47,10 @@ class PipelineProcessor:
47
47
  self.last_preview_check_time = 0
48
48
  self.preview_check_interval = 10.0 # Check every 10 seconds (reduced from 5s to save CPU)
49
49
  self.pipeline_repo = WorkerSourcePipelineRepository()
50
+
51
+ # RTMP frame rate limiting to reduce CPU
52
+ self.last_rtmp_frame_time = 0
53
+ self.rtmp_frame_interval = 1.0 / 25.0 # 25 FPS for RTMP (matching render FPS)
50
54
 
51
55
  self.detection_processor_codes = [
52
56
  PPEDetectionProcessor.code,
@@ -199,9 +203,17 @@ class PipelineProcessor:
199
203
  self._check_and_update_rtmp_streaming()
200
204
  last_preview_check = loop_start
201
205
 
206
+ # Only draw frames when actually needed (RTMP active or debug)
202
207
  should_draw = self.rtmp_streaming_active or self.debug_flag
203
208
 
204
- if should_draw:
209
+ # For RTMP, also check if it's time to send a frame (rate limiting)
210
+ should_draw_for_rtmp = (
211
+ self.rtmp_streaming_active and
212
+ self.rtmp_streamer is not None and
213
+ (loop_start - self.last_rtmp_frame_time >= self.rtmp_frame_interval)
214
+ )
215
+
216
+ if should_draw_for_rtmp or self.debug_flag:
205
217
  try:
206
218
  frame_to_draw = frame.copy()
207
219
  self.frame_drawer.draw_polygons(frame_to_draw)
@@ -215,7 +227,7 @@ class PipelineProcessor:
215
227
  logging.error(f"❌ Draw failed, using raw frame: {e}")
216
228
  drawn_frame = frame
217
229
  else:
218
- drawn_frame = frame
230
+ drawn_frame = None # Don't waste CPU drawing if not needed
219
231
 
220
232
  if self.debug_flag:
221
233
  tracked_objects_render = self._process_frame(frame)
@@ -235,13 +247,16 @@ class PipelineProcessor:
235
247
  try:
236
248
  self.rtmp_streamer = RTMPStreamer(self.pipeline_id)
237
249
  logging.info(f"🎬 RTMP streamer initialized for pipeline {pipeline_id} (preview requested)")
250
+ self.last_rtmp_frame_time = 0 # Reset frame time on new stream
238
251
  except Exception as e:
239
252
  logging.error(f"❌ Failed to initialize RTMP streamer for pipeline {pipeline_id}: {e}")
240
253
  self.rtmp_streamer = None
241
254
 
242
- if self.rtmp_streamer:
255
+ if self.rtmp_streamer and drawn_frame is not None:
256
+ # Frame already rate-limited by drawing logic above
243
257
  try:
244
258
  self.rtmp_streamer.push_frame(drawn_frame)
259
+ self.last_rtmp_frame_time = loop_start
245
260
  except Exception as e:
246
261
  logging.error(f"❌ RTMP push error for pipeline {pipeline_id}: {e}")
247
262
  if "initialization_failed" in str(e).lower():
@@ -259,9 +274,10 @@ class PipelineProcessor:
259
274
  finally:
260
275
  self.rtmp_streamer = None
261
276
 
262
- # Simplified queue feeding - avoid expensive try-except
263
- if not self.frame_queue.full():
264
- self.frame_queue.put_nowait(frame)
277
+ # Only feed frames to detection queue if detection processor is active
278
+ if self.detection_processor is not None:
279
+ if not self.frame_queue.full():
280
+ self.frame_queue.put_nowait(frame)
265
281
 
266
282
  loop_elapsed = time.time() - loop_start
267
283
  sleep_time = max(0.001, target_frame_time - loop_elapsed)
@@ -307,7 +323,13 @@ class PipelineProcessor:
307
323
 
308
324
  while self.running:
309
325
  try:
310
- frame = self.frame_queue.get(block=True, timeout=1)
326
+ # Calculate how long to wait for next detection
327
+ current_time = time.time()
328
+ time_since_last_detection = current_time - last_detection_time
329
+ time_until_next_detection = max(0.1, self.detection_interval - time_since_last_detection)
330
+
331
+ # Wait for frame with timeout aligned to detection interval
332
+ frame = self.frame_queue.get(block=True, timeout=time_until_next_detection)
311
333
 
312
334
  # Check for poison pill (None = stop signal)
313
335
  if frame is None:
@@ -332,9 +354,14 @@ class PipelineProcessor:
332
354
  except queue.Empty:
333
355
  pass
334
356
 
335
- # Respect detection interval
357
+ # Respect detection interval - skip if too soon
336
358
  if (current_time - last_detection_time) < self.detection_interval:
359
+ # Sleep the remaining time instead of busy-waiting
360
+ remaining_time = self.detection_interval - (current_time - last_detection_time)
361
+ if remaining_time > 0.01:
362
+ time.sleep(remaining_time)
337
363
  continue
364
+
338
365
  last_detection_time = current_time
339
366
 
340
367
  if self.detection_processor is None or frame is None or frame.size == 0:
@@ -363,6 +390,7 @@ class PipelineProcessor:
363
390
  )
364
391
 
365
392
  except queue.Empty:
393
+ # Timeout occurred - this is normal, just continue to next iteration
366
394
  pass
367
395
  except Exception as e:
368
396
  logging.error(f"❌ Error in detection thread for pipeline {pipeline_id}: {e}", exc_info=True)
@@ -5,6 +5,7 @@ import threading
5
5
  from typing import Dict, Set, Optional
6
6
  from ..repositories.WorkerSourcePipelineDebugRepository import WorkerSourcePipelineDebugRepository
7
7
  from ..repositories.WorkerSourcePipelineRepository import WorkerSourcePipelineRepository
8
+ from ..repositories.WorkerSourceRepository import WorkerSourceRepository
8
9
  from .PipelineManager import PipelineManager
9
10
  from .ModelManager import ModelManager
10
11
  from ..streams.VideoStreamManager import VideoStreamManager
@@ -19,6 +20,7 @@ class PipelineSyncThread(threading.Thread):
19
20
  self.polling_interval = polling_interval
20
21
  self.pipeline_repo = WorkerSourcePipelineRepository()
21
22
  self.debug_repo = WorkerSourcePipelineDebugRepository()
23
+ self.source_repo = WorkerSourceRepository()
22
24
  self.model_manager = ModelManager()
23
25
  self.running = True
24
26
  self.pipeline_manager = PipelineManager(video_manager, self.on_pipeline_stopped, max_workers)
@@ -82,6 +84,11 @@ class PipelineSyncThread(threading.Thread):
82
84
  pipeline.pipeline_status_code = 'run'
83
85
 
84
86
  if pipeline.pipeline_status_code == 'run':
87
+ # Check if source is connected before starting pipeline
88
+ if not self.source_repo.is_source_connected(pipeline.worker_source_id):
89
+ logging.warning(f"⚠️ Skipping pipeline {pid} ({pipeline.name}): Source {pipeline.worker_source_id} is disconnected")
90
+ continue
91
+
85
92
  detector = self.model_manager.get_detector(pipeline.ai_model_id)
86
93
 
87
94
  if not detector and pipeline.ai_model_id:
@@ -146,6 +153,12 @@ class PipelineSyncThread(threading.Thread):
146
153
  if requires_restart:
147
154
  logging.info(f"🔄 Restarting pipeline due to significant changes: {pid}")
148
155
  self.pipeline_manager.stop_pipeline(pid)
156
+
157
+ # Check if source is connected before restarting
158
+ if not self.source_repo.is_source_connected(db_pipeline.worker_source_id):
159
+ logging.warning(f"⚠️ Cannot restart pipeline {pid}: Source {db_pipeline.worker_source_id} is disconnected")
160
+ return
161
+
149
162
  self.pipeline_manager.start_pipeline(db_pipeline, db_detector)
150
163
  else:
151
164
  # Update config for minor changes that don't require restart
@@ -19,3 +19,38 @@ class WorkerSourceRepository(BaseRepository):
19
19
  for source in sources:
20
20
  session.expunge(source)
21
21
  return sources
22
+
23
+ def get_worker_source(self, source_id: str):
24
+ """
25
+ Fetch a single worker source by ID.
26
+
27
+ Args:
28
+ source_id (str): The worker source ID
29
+
30
+ Returns:
31
+ WorkerSourceEntity: The worker source entity or None if not found
32
+ """
33
+ with self._get_session() as session:
34
+ session.expire_all()
35
+ source = session.query(WorkerSourceEntity).filter(
36
+ WorkerSourceEntity.id == source_id
37
+ ).first()
38
+ if source:
39
+ session.expunge(source)
40
+ return source
41
+
42
+ def is_source_connected(self, source_id: str) -> bool:
43
+ """
44
+ Check if a worker source is connected.
45
+
46
+ Args:
47
+ source_id (str): The worker source ID
48
+
49
+ Returns:
50
+ bool: True if source is connected, False otherwise
51
+ """
52
+ source = self.get_worker_source(source_id)
53
+ if not source:
54
+ return False
55
+
56
+ return source.status_code == "connected" if source.status_code else False
@@ -25,7 +25,7 @@ class RTMPStreamer:
25
25
  # Class-level lock to stagger stream initialization across all instances
26
26
  _initialization_lock = threading.Lock()
27
27
  _last_initialization_time = 0
28
- _min_initialization_delay = 0.5 # 500ms between stream starts
28
+ _min_initialization_delay = 1.5 # 1.5 seconds between stream starts (increased from 0.5s)
29
29
 
30
30
  def __init__(self, pipeline_id: str, fps: int = 25, bitrate: str = "1500k"):
31
31
  self.pipeline_id = pipeline_id
@@ -307,6 +307,7 @@ class RTMPStreamer:
307
307
  if not self.is_active():
308
308
  raise BrokenPipeError("FFmpeg process is not active")
309
309
  self._ffmpeg_process.stdin.write(frame.tobytes())
310
+ self._ffmpeg_process.stdin.flush() # Ensure data is sent immediately
310
311
  except (BrokenPipeError, OSError) as e:
311
312
 
312
313
  # Check if this failure was from a HW encoder
@@ -356,23 +357,20 @@ class RTMPStreamer:
356
357
  '-framerate', str(self.fps), '-i', '-',
357
358
  ]
358
359
 
360
+ # Add encoder and encoder-specific parameters
359
361
  cmd.extend(encoder_args)
360
362
 
363
+ # Common video parameters
361
364
  cmd.extend([
362
- '-profile:v', 'main', '-pix_fmt', 'yuv420p',
365
+ '-pix_fmt', 'yuv420p',
363
366
  '-b:v', f"{self.bitrate}k", '-maxrate', f"{self.bitrate}k", '-bufsize', f"{self.bitrate*2}k",
364
367
  '-g', str(self.fps * 2), '-keyint_min', str(self.fps),
365
- '-force_key_frames', 'expr:gte(t,n_forced*1)', '-an',
366
- '-flvflags', 'no_duration_filesize', '-f', 'flv', self.rtmp_url,
368
+ '-force_key_frames', 'expr:gte(t,n_forced*1)',
369
+ '-an', # No audio
370
+ '-flvflags', 'no_duration_filesize',
371
+ '-f', 'flv', self.rtmp_url,
367
372
  ])
368
373
 
369
- if encoder_name == "libx264":
370
- cmd.extend([
371
- "-preset", "ultrafast",
372
- "-tune", "zerolatency",
373
- "-x264-params", "open_gop=0:aud=1:repeat-headers=1:nal-hrd=cbr",
374
- ])
375
-
376
374
  return cmd, encoder_name
377
375
 
378
376
  def _select_ffmpeg_encoder(self, force_cpu: bool = False) -> Tuple[List[str], str]:
@@ -381,26 +379,74 @@ class RTMPStreamer:
381
379
  Will force CPU if force_cpu is True.
382
380
  """
383
381
  if force_cpu:
384
- return ["-c:v", "libx264"], "libx264"
382
+ return [
383
+ "-c:v", "libx264",
384
+ "-preset", "ultrafast",
385
+ "-tune", "zerolatency",
386
+ "-profile:v", "main",
387
+ ], "libx264"
385
388
 
386
389
  force_encoder = os.environ.get("RTMP_ENCODER", "").lower()
387
390
 
388
391
  if force_encoder == "cpu" or force_encoder == "libx264":
389
- return ["-c:v", "libx264"], "libx264"
392
+ return [
393
+ "-c:v", "libx264",
394
+ "-preset", "ultrafast",
395
+ "-tune", "zerolatency",
396
+ "-profile:v", "main",
397
+ ], "libx264"
390
398
  elif force_encoder == "nvenc":
391
- return ["-c:v", "h264_nvenc", "-preset", "llhp"], "h264_nvenc"
399
+ return [
400
+ "-c:v", "h264_nvenc",
401
+ "-preset", "p1", # p1 = fastest, p7 = slowest
402
+ "-tune", "ull", # ultra-low latency
403
+ "-rc:v", "cbr", # constant bitrate for streaming
404
+ "-rc-lookahead", "0", # disable lookahead for lower latency
405
+ "-delay", "0", # zero delay
406
+ "-zerolatency", "1", # enable zero latency mode
407
+ "-profile:v", "main",
408
+ "-gpu", "0", # Use first GPU
409
+ ], "h264_nvenc"
392
410
 
393
411
  if self._platform.is_jetson():
394
- # Jetson-specific encoder
395
- return ["-c:v", "h264_omx"], "h264_omx"
412
+ # Jetson-specific encoder with optimizations
413
+ return [
414
+ "-c:v", "h264_nvenc",
415
+ "-preset", "p1",
416
+ "-tune", "ull",
417
+ "-rc:v", "cbr",
418
+ "-rc-lookahead", "0",
419
+ "-delay", "0",
420
+ "-zerolatency", "1",
421
+ "-profile:v", "main",
422
+ ], "h264_nvenc"
396
423
 
397
424
  if sys.platform == "darwin":
398
- return ["-c:v", "h264_videotoolbox"], "h264_videotoolbox"
425
+ return [
426
+ "-c:v", "h264_videotoolbox",
427
+ "-profile:v", "main",
428
+ "-realtime", "1",
429
+ ], "h264_videotoolbox"
399
430
 
400
431
  has_nvidia = (os.environ.get("NVIDIA_VISIBLE_DEVICES") is not None or
401
432
  os.path.exists("/proc/driver/nvidia/version"))
402
433
 
403
434
  if has_nvidia:
404
- return ["-c:v", "h264_nvenc", "-preset", "llhp"], "h264_nvenc"
435
+ return [
436
+ "-c:v", "h264_nvenc",
437
+ "-preset", "p1", # p1 = fastest preset
438
+ "-tune", "ull", # ultra-low latency
439
+ "-rc:v", "cbr", # constant bitrate
440
+ "-rc-lookahead", "0", # disable lookahead
441
+ "-delay", "0", # zero delay
442
+ "-zerolatency", "1", # zero latency mode
443
+ "-profile:v", "main",
444
+ "-gpu", "0", # Use first GPU
445
+ ], "h264_nvenc"
405
446
 
406
- return ["-c:v", "libx264"], "libx264"
447
+ return [
448
+ "-c:v", "libx264",
449
+ "-preset", "ultrafast",
450
+ "-tune", "zerolatency",
451
+ "-profile:v", "main",
452
+ ], "libx264"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nedo-vision-worker-core
3
- Version: 0.4.0
3
+ Version: 0.4.3
4
4
  Summary: Nedo Vision Worker Core Library for AI Vision Processing
5
5
  Author-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
6
6
  Maintainer-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
@@ -1,4 +1,4 @@
1
- nedo_vision_worker_core/__init__.py,sha256=_NVuaujglbPX63LBohmNR6a1n7ctmMl97EsX9LBIVHA,1924
1
+ nedo_vision_worker_core/__init__.py,sha256=65HOyr4VnCAkUlmbf0yGE_zIL4hLNL1IOY8zh_rnl8E,1923
2
2
  nedo_vision_worker_core/cli.py,sha256=8YuKWsIgICUYXE_QtwyU3WzGhVjTWiAo5uzpFOmjNc8,5766
3
3
  nedo_vision_worker_core/core_service.py,sha256=q8-GuGW_l5l6wTWQDqc7BDdhM7zKC-mMLZ5wIHu9xV0,11628
4
4
  nedo_vision_worker_core/doctor.py,sha256=K_-hVV2-mdEefZ4Cfu5hMCiOxBiI1aXY8VtkkpK80Lc,10651
@@ -59,10 +59,10 @@ nedo_vision_worker_core/models/worker_source_pipeline_debug.py,sha256=6S7TkN37Fr
59
59
  nedo_vision_worker_core/models/worker_source_pipeline_detection.py,sha256=p6CJsiVCKprTYrNxJsiTB8njXdHkjZKVEyBceRVE6fY,560
60
60
  nedo_vision_worker_core/pipeline/ModelManager.py,sha256=2DoQiIdF-PAqU7nT_u6bj-DY0aT2FHb8kt24okGGCRc,7449
61
61
  nedo_vision_worker_core/pipeline/PipelineConfigManager.py,sha256=X55i9GyXcW9ylO6cj2UMAZFSxxPViacL4H4DZl60CAY,1157
62
- nedo_vision_worker_core/pipeline/PipelineManager.py,sha256=3I9UBJu_rRfTEctwj8i4hO4MHjpBtYpfh-rIi64qgEw,7638
62
+ nedo_vision_worker_core/pipeline/PipelineManager.py,sha256=AlDwBYYRPeAeh2ilmC8n-A_2gYPqAzeSSVpR1Tc0ipE,8366
63
63
  nedo_vision_worker_core/pipeline/PipelinePrepocessor.py,sha256=cCiVSHHqsKCtKYURdYoEjHJX2GnT6zd8kQ6ZukjQ3V0,1271
64
- nedo_vision_worker_core/pipeline/PipelineProcessor.py,sha256=FYpZw2vRRuweJ798gRhygOfF7cCJbKRApwZ52kxSEEM,33478
65
- nedo_vision_worker_core/pipeline/PipelineSyncThread.py,sha256=HkW6wj0eDr6M1K3Y25IlB2V6tpIZsKA34AM49AXvcQk,8707
64
+ nedo_vision_worker_core/pipeline/PipelineProcessor.py,sha256=XITn3d8la37sCYEI_ZQzBoYYK6nrY9hAg6CaLKbxBF0,35305
65
+ nedo_vision_worker_core/pipeline/PipelineSyncThread.py,sha256=UmjME7b1UYABVXKzYvlYBhVIyDcrsL5JDMxNt9G-0yk,9504
66
66
  nedo_vision_worker_core/pipeline/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
67
67
  nedo_vision_worker_core/preprocessing/ImageResizer.py,sha256=RvOazxe6dJQuiy0ZH4lIGbdFfiu0FLUVCHoMvxkDNT4,1324
68
68
  nedo_vision_worker_core/preprocessing/ImageRoi.py,sha256=iO7oQ-SdUSA_kTIVBuq_mdycXsiJNfiFD3J7-VTxiQ4,2141
@@ -75,12 +75,12 @@ nedo_vision_worker_core/repositories/RestrictedAreaRepository.py,sha256=a5Vc8WLT
75
75
  nedo_vision_worker_core/repositories/WorkerSourcePipelineDebugRepository.py,sha256=lN_yip6woya9YUA5sYKbTyDQz2qSfgqkr3YP2hSd9ws,3211
76
76
  nedo_vision_worker_core/repositories/WorkerSourcePipelineDetectionRepository.py,sha256=5m4lvmIETJSGDH1T1EHuUDWC-13t5I860UbN_uzEj9A,2641
77
77
  nedo_vision_worker_core/repositories/WorkerSourcePipelineRepository.py,sha256=vwwRA1INuK66siOHNZxSBX8CE9uEW8VVcCIA7dmshKo,4714
78
- nedo_vision_worker_core/repositories/WorkerSourceRepository.py,sha256=-a-UlsopPlJWlY36QUodPEjSZVE3BDoLgsVAioiNOo0,663
78
+ nedo_vision_worker_core/repositories/WorkerSourceRepository.py,sha256=YFevfYhAsYd7Eho1iagzjk67tKAQfqmoIExyxvR2Bzc,1760
79
79
  nedo_vision_worker_core/repositories/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
80
80
  nedo_vision_worker_core/services/SharedVideoStreamServer.py,sha256=rhCineMKPG3GQbrMHlSHP4xhXaGZ6Rn1oqIajW5xpaY,9827
81
81
  nedo_vision_worker_core/services/VideoSharingDaemon.py,sha256=iY6afEKTOsphfHvmZTL0grezka2DS9DDq-1EIpVMy0Y,28524
82
82
  nedo_vision_worker_core/services/VideoSharingDaemonManager.py,sha256=sc8VZo5iwoOdR8uTiel5BKz6-eZ7wwLy3IwV_3tsAu0,10340
83
- nedo_vision_worker_core/streams/RTMPStreamer.py,sha256=0eThAwEyOg1ZuO0zbIvdqOICGQDRoOPxEcOBQtskM7A,17110
83
+ nedo_vision_worker_core/streams/RTMPStreamer.py,sha256=7vg2as_TtmZavLnzhEUOv6HhjoGH5X3JO9HUI74meNs,18789
84
84
  nedo_vision_worker_core/streams/SharedVideoDeviceManager.py,sha256=vSslwxbhKH6FPndR1HcSFIVWtF-iiOQMlSa4VvFa6M4,16265
85
85
  nedo_vision_worker_core/streams/StreamSyncThread.py,sha256=ETT0N_P90ksn6Q5pb7NvMadqCuoicz_g52lcDkHIp88,5382
86
86
  nedo_vision_worker_core/streams/VideoStream.py,sha256=nGtJ4FAZ1Ek-8hVRopEt0bLWLpa10OtyUwdDEuXLObQ,13343
@@ -98,8 +98,8 @@ nedo_vision_worker_core/util/PipelinePreviewChecker.py,sha256=XxlSMlrDlRrzfV8_Y-
98
98
  nedo_vision_worker_core/util/PlatformDetector.py,sha256=GGL8UfeMQITR22EMYIRWnuOEnSqo7Dr5mb0PaFrl8AM,3006
99
99
  nedo_vision_worker_core/util/TablePrinter.py,sha256=wzLGgb1GFMeIbAP6HmKcZD33j4D-IlyqlyeR7C5yD7w,1137
100
100
  nedo_vision_worker_core/util/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
101
- nedo_vision_worker_core-0.4.0.dist-info/METADATA,sha256=xzaULdQDUdB5jKhmotw2EL4Ua4LJlQ0kExewX2JfUn8,14426
102
- nedo_vision_worker_core-0.4.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
103
- nedo_vision_worker_core-0.4.0.dist-info/entry_points.txt,sha256=pIPafsvPnBw-fpBKBmc1NQCQ6PQY3ad8mZ6mn8_p5FI,70
104
- nedo_vision_worker_core-0.4.0.dist-info/top_level.txt,sha256=y8kusXjVYqtG8MSHYWTrk8bRrvjOrphKXYyzu943TTQ,24
105
- nedo_vision_worker_core-0.4.0.dist-info/RECORD,,
101
+ nedo_vision_worker_core-0.4.3.dist-info/METADATA,sha256=niAjMl89ByK-mEOVchjKAZ2s52xyctOOIS-lLK7_QkU,14426
102
+ nedo_vision_worker_core-0.4.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
103
+ nedo_vision_worker_core-0.4.3.dist-info/entry_points.txt,sha256=pIPafsvPnBw-fpBKBmc1NQCQ6PQY3ad8mZ6mn8_p5FI,70
104
+ nedo_vision_worker_core-0.4.3.dist-info/top_level.txt,sha256=y8kusXjVYqtG8MSHYWTrk8bRrvjOrphKXYyzu943TTQ,24
105
+ nedo_vision_worker_core-0.4.3.dist-info/RECORD,,