nedo-vision-worker-core 0.4.0__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nedo-vision-worker-core might be problematic. Click here for more details.

@@ -7,10 +7,10 @@ A library for running AI vision processing and detection in the Nedo Vision plat
7
7
  from .core_service import CoreService
8
8
  from .callbacks import DetectionType, CallbackTrigger, DetectionData, IntervalMetadata
9
9
 
10
- __version__ = "0.4.0"
10
+ __version__ = "0.4.1"
11
11
  __all__ = [
12
12
  "CoreService",
13
- "DetectionType",
13
+ "DetectionType",
14
14
  "CallbackTrigger",
15
15
  "DetectionData",
16
16
  "IntervalMetadata",
@@ -259,9 +259,10 @@ class PipelineProcessor:
259
259
  finally:
260
260
  self.rtmp_streamer = None
261
261
 
262
- # Simplified queue feeding - avoid expensive try-except
263
- if not self.frame_queue.full():
264
- self.frame_queue.put_nowait(frame)
262
+ # Only feed frames to detection queue if detection processor is active
263
+ if self.detection_processor is not None:
264
+ if not self.frame_queue.full():
265
+ self.frame_queue.put_nowait(frame)
265
266
 
266
267
  loop_elapsed = time.time() - loop_start
267
268
  sleep_time = max(0.001, target_frame_time - loop_elapsed)
@@ -307,7 +308,13 @@ class PipelineProcessor:
307
308
 
308
309
  while self.running:
309
310
  try:
310
- frame = self.frame_queue.get(block=True, timeout=1)
311
+ # Calculate how long to wait for next detection
312
+ current_time = time.time()
313
+ time_since_last_detection = current_time - last_detection_time
314
+ time_until_next_detection = max(0.1, self.detection_interval - time_since_last_detection)
315
+
316
+ # Wait for frame with timeout aligned to detection interval
317
+ frame = self.frame_queue.get(block=True, timeout=time_until_next_detection)
311
318
 
312
319
  # Check for poison pill (None = stop signal)
313
320
  if frame is None:
@@ -332,9 +339,14 @@ class PipelineProcessor:
332
339
  except queue.Empty:
333
340
  pass
334
341
 
335
- # Respect detection interval
342
+ # Respect detection interval - skip if too soon
336
343
  if (current_time - last_detection_time) < self.detection_interval:
344
+ # Sleep the remaining time instead of busy-waiting
345
+ remaining_time = self.detection_interval - (current_time - last_detection_time)
346
+ if remaining_time > 0.01:
347
+ time.sleep(remaining_time)
337
348
  continue
349
+
338
350
  last_detection_time = current_time
339
351
 
340
352
  if self.detection_processor is None or frame is None or frame.size == 0:
@@ -363,6 +375,7 @@ class PipelineProcessor:
363
375
  )
364
376
 
365
377
  except queue.Empty:
378
+ # Timeout occurred - this is normal, just continue to next iteration
366
379
  pass
367
380
  except Exception as e:
368
381
  logging.error(f"❌ Error in detection thread for pipeline {pipeline_id}: {e}", exc_info=True)
@@ -307,6 +307,7 @@ class RTMPStreamer:
307
307
  if not self.is_active():
308
308
  raise BrokenPipeError("FFmpeg process is not active")
309
309
  self._ffmpeg_process.stdin.write(frame.tobytes())
310
+ self._ffmpeg_process.stdin.flush() # Ensure data is sent immediately
310
311
  except (BrokenPipeError, OSError) as e:
311
312
 
312
313
  # Check if this failure was from a HW encoder
@@ -356,23 +357,20 @@ class RTMPStreamer:
356
357
  '-framerate', str(self.fps), '-i', '-',
357
358
  ]
358
359
 
360
+ # Add encoder and encoder-specific parameters
359
361
  cmd.extend(encoder_args)
360
362
 
363
+ # Common video parameters
361
364
  cmd.extend([
362
- '-profile:v', 'main', '-pix_fmt', 'yuv420p',
365
+ '-pix_fmt', 'yuv420p',
363
366
  '-b:v', f"{self.bitrate}k", '-maxrate', f"{self.bitrate}k", '-bufsize', f"{self.bitrate*2}k",
364
367
  '-g', str(self.fps * 2), '-keyint_min', str(self.fps),
365
- '-force_key_frames', 'expr:gte(t,n_forced*1)', '-an',
366
- '-flvflags', 'no_duration_filesize', '-f', 'flv', self.rtmp_url,
368
+ '-force_key_frames', 'expr:gte(t,n_forced*1)',
369
+ '-an', # No audio
370
+ '-flvflags', 'no_duration_filesize',
371
+ '-f', 'flv', self.rtmp_url,
367
372
  ])
368
373
 
369
- if encoder_name == "libx264":
370
- cmd.extend([
371
- "-preset", "ultrafast",
372
- "-tune", "zerolatency",
373
- "-x264-params", "open_gop=0:aud=1:repeat-headers=1:nal-hrd=cbr",
374
- ])
375
-
376
374
  return cmd, encoder_name
377
375
 
378
376
  def _select_ffmpeg_encoder(self, force_cpu: bool = False) -> Tuple[List[str], str]:
@@ -381,26 +379,74 @@ class RTMPStreamer:
381
379
  Will force CPU if force_cpu is True.
382
380
  """
383
381
  if force_cpu:
384
- return ["-c:v", "libx264"], "libx264"
382
+ return [
383
+ "-c:v", "libx264",
384
+ "-preset", "ultrafast",
385
+ "-tune", "zerolatency",
386
+ "-profile:v", "main",
387
+ ], "libx264"
385
388
 
386
389
  force_encoder = os.environ.get("RTMP_ENCODER", "").lower()
387
390
 
388
391
  if force_encoder == "cpu" or force_encoder == "libx264":
389
- return ["-c:v", "libx264"], "libx264"
392
+ return [
393
+ "-c:v", "libx264",
394
+ "-preset", "ultrafast",
395
+ "-tune", "zerolatency",
396
+ "-profile:v", "main",
397
+ ], "libx264"
390
398
  elif force_encoder == "nvenc":
391
- return ["-c:v", "h264_nvenc", "-preset", "llhp"], "h264_nvenc"
399
+ return [
400
+ "-c:v", "h264_nvenc",
401
+ "-preset", "p1", # p1 = fastest, p7 = slowest
402
+ "-tune", "ull", # ultra-low latency
403
+ "-rc:v", "cbr", # constant bitrate for streaming
404
+ "-rc-lookahead", "0", # disable lookahead for lower latency
405
+ "-delay", "0", # zero delay
406
+ "-zerolatency", "1", # enable zero latency mode
407
+ "-profile:v", "main",
408
+ "-gpu", "0", # Use first GPU
409
+ ], "h264_nvenc"
392
410
 
393
411
  if self._platform.is_jetson():
394
- # Jetson-specific encoder
395
- return ["-c:v", "h264_omx"], "h264_omx"
412
+ # Jetson-specific encoder with optimizations
413
+ return [
414
+ "-c:v", "h264_nvenc",
415
+ "-preset", "p1",
416
+ "-tune", "ull",
417
+ "-rc:v", "cbr",
418
+ "-rc-lookahead", "0",
419
+ "-delay", "0",
420
+ "-zerolatency", "1",
421
+ "-profile:v", "main",
422
+ ], "h264_nvenc"
396
423
 
397
424
  if sys.platform == "darwin":
398
- return ["-c:v", "h264_videotoolbox"], "h264_videotoolbox"
425
+ return [
426
+ "-c:v", "h264_videotoolbox",
427
+ "-profile:v", "main",
428
+ "-realtime", "1",
429
+ ], "h264_videotoolbox"
399
430
 
400
431
  has_nvidia = (os.environ.get("NVIDIA_VISIBLE_DEVICES") is not None or
401
432
  os.path.exists("/proc/driver/nvidia/version"))
402
433
 
403
434
  if has_nvidia:
404
- return ["-c:v", "h264_nvenc", "-preset", "llhp"], "h264_nvenc"
435
+ return [
436
+ "-c:v", "h264_nvenc",
437
+ "-preset", "p1", # p1 = fastest preset
438
+ "-tune", "ull", # ultra-low latency
439
+ "-rc:v", "cbr", # constant bitrate
440
+ "-rc-lookahead", "0", # disable lookahead
441
+ "-delay", "0", # zero delay
442
+ "-zerolatency", "1", # zero latency mode
443
+ "-profile:v", "main",
444
+ "-gpu", "0", # Use first GPU
445
+ ], "h264_nvenc"
405
446
 
406
- return ["-c:v", "libx264"], "libx264"
447
+ return [
448
+ "-c:v", "libx264",
449
+ "-preset", "ultrafast",
450
+ "-tune", "zerolatency",
451
+ "-profile:v", "main",
452
+ ], "libx264"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nedo-vision-worker-core
3
- Version: 0.4.0
3
+ Version: 0.4.1
4
4
  Summary: Nedo Vision Worker Core Library for AI Vision Processing
5
5
  Author-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
6
6
  Maintainer-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
@@ -1,4 +1,4 @@
1
- nedo_vision_worker_core/__init__.py,sha256=_NVuaujglbPX63LBohmNR6a1n7ctmMl97EsX9LBIVHA,1924
1
+ nedo_vision_worker_core/__init__.py,sha256=PMh9hXyCQ2JPaLxq5mFDqRZonzYLfZnwwtGukl0GVBM,1923
2
2
  nedo_vision_worker_core/cli.py,sha256=8YuKWsIgICUYXE_QtwyU3WzGhVjTWiAo5uzpFOmjNc8,5766
3
3
  nedo_vision_worker_core/core_service.py,sha256=q8-GuGW_l5l6wTWQDqc7BDdhM7zKC-mMLZ5wIHu9xV0,11628
4
4
  nedo_vision_worker_core/doctor.py,sha256=K_-hVV2-mdEefZ4Cfu5hMCiOxBiI1aXY8VtkkpK80Lc,10651
@@ -61,7 +61,7 @@ nedo_vision_worker_core/pipeline/ModelManager.py,sha256=2DoQiIdF-PAqU7nT_u6bj-DY
61
61
  nedo_vision_worker_core/pipeline/PipelineConfigManager.py,sha256=X55i9GyXcW9ylO6cj2UMAZFSxxPViacL4H4DZl60CAY,1157
62
62
  nedo_vision_worker_core/pipeline/PipelineManager.py,sha256=3I9UBJu_rRfTEctwj8i4hO4MHjpBtYpfh-rIi64qgEw,7638
63
63
  nedo_vision_worker_core/pipeline/PipelinePrepocessor.py,sha256=cCiVSHHqsKCtKYURdYoEjHJX2GnT6zd8kQ6ZukjQ3V0,1271
64
- nedo_vision_worker_core/pipeline/PipelineProcessor.py,sha256=FYpZw2vRRuweJ798gRhygOfF7cCJbKRApwZ52kxSEEM,33478
64
+ nedo_vision_worker_core/pipeline/PipelineProcessor.py,sha256=qjAviYziFX9zJbRDIx7me94ZEccA1r53bunuDySTPhQ,34356
65
65
  nedo_vision_worker_core/pipeline/PipelineSyncThread.py,sha256=HkW6wj0eDr6M1K3Y25IlB2V6tpIZsKA34AM49AXvcQk,8707
66
66
  nedo_vision_worker_core/pipeline/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
67
67
  nedo_vision_worker_core/preprocessing/ImageResizer.py,sha256=RvOazxe6dJQuiy0ZH4lIGbdFfiu0FLUVCHoMvxkDNT4,1324
@@ -80,7 +80,7 @@ nedo_vision_worker_core/repositories/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8
80
80
  nedo_vision_worker_core/services/SharedVideoStreamServer.py,sha256=rhCineMKPG3GQbrMHlSHP4xhXaGZ6Rn1oqIajW5xpaY,9827
81
81
  nedo_vision_worker_core/services/VideoSharingDaemon.py,sha256=iY6afEKTOsphfHvmZTL0grezka2DS9DDq-1EIpVMy0Y,28524
82
82
  nedo_vision_worker_core/services/VideoSharingDaemonManager.py,sha256=sc8VZo5iwoOdR8uTiel5BKz6-eZ7wwLy3IwV_3tsAu0,10340
83
- nedo_vision_worker_core/streams/RTMPStreamer.py,sha256=0eThAwEyOg1ZuO0zbIvdqOICGQDRoOPxEcOBQtskM7A,17110
83
+ nedo_vision_worker_core/streams/RTMPStreamer.py,sha256=X6QL84VdjKd995BwpcvD99sCBJGxj4MXWI0q9zo5Izw,18761
84
84
  nedo_vision_worker_core/streams/SharedVideoDeviceManager.py,sha256=vSslwxbhKH6FPndR1HcSFIVWtF-iiOQMlSa4VvFa6M4,16265
85
85
  nedo_vision_worker_core/streams/StreamSyncThread.py,sha256=ETT0N_P90ksn6Q5pb7NvMadqCuoicz_g52lcDkHIp88,5382
86
86
  nedo_vision_worker_core/streams/VideoStream.py,sha256=nGtJ4FAZ1Ek-8hVRopEt0bLWLpa10OtyUwdDEuXLObQ,13343
@@ -98,8 +98,8 @@ nedo_vision_worker_core/util/PipelinePreviewChecker.py,sha256=XxlSMlrDlRrzfV8_Y-
98
98
  nedo_vision_worker_core/util/PlatformDetector.py,sha256=GGL8UfeMQITR22EMYIRWnuOEnSqo7Dr5mb0PaFrl8AM,3006
99
99
  nedo_vision_worker_core/util/TablePrinter.py,sha256=wzLGgb1GFMeIbAP6HmKcZD33j4D-IlyqlyeR7C5yD7w,1137
100
100
  nedo_vision_worker_core/util/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
101
- nedo_vision_worker_core-0.4.0.dist-info/METADATA,sha256=xzaULdQDUdB5jKhmotw2EL4Ua4LJlQ0kExewX2JfUn8,14426
102
- nedo_vision_worker_core-0.4.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
103
- nedo_vision_worker_core-0.4.0.dist-info/entry_points.txt,sha256=pIPafsvPnBw-fpBKBmc1NQCQ6PQY3ad8mZ6mn8_p5FI,70
104
- nedo_vision_worker_core-0.4.0.dist-info/top_level.txt,sha256=y8kusXjVYqtG8MSHYWTrk8bRrvjOrphKXYyzu943TTQ,24
105
- nedo_vision_worker_core-0.4.0.dist-info/RECORD,,
101
+ nedo_vision_worker_core-0.4.1.dist-info/METADATA,sha256=eVzhZ0Gwb-Rd1lIasDKRUrGAfrDzqPgiTfR54rg6Vl8,14426
102
+ nedo_vision_worker_core-0.4.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
103
+ nedo_vision_worker_core-0.4.1.dist-info/entry_points.txt,sha256=pIPafsvPnBw-fpBKBmc1NQCQ6PQY3ad8mZ6mn8_p5FI,70
104
+ nedo_vision_worker_core-0.4.1.dist-info/top_level.txt,sha256=y8kusXjVYqtG8MSHYWTrk8bRrvjOrphKXYyzu943TTQ,24
105
+ nedo_vision_worker_core-0.4.1.dist-info/RECORD,,