nedo-vision-worker-core 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nedo-vision-worker-core might be problematic. Click here for more details.

@@ -7,7 +7,7 @@ A library for running AI vision processing and detection in the Nedo Vision plat
7
7
  from .core_service import CoreService
8
8
  from .callbacks import DetectionType, CallbackTrigger, DetectionData, IntervalMetadata
9
9
 
10
- __version__ = "0.3.0"
10
+ __version__ = "0.3.2"
11
11
  __all__ = [
12
12
  "CoreService",
13
13
  "DetectionType",
@@ -91,61 +91,15 @@ Detection Callbacks:
91
91
 
92
92
  run_parser.add_argument(
93
93
  "--rtmp-server",
94
- default="rtmp://localhost:1935/live",
95
- help="RTMP server URL for video streaming (default: rtmp://localhost:1935/live)"
94
+ default="rtmp://live.vision.sindika.co.id:1935/live",
95
+ help="RTMP server URL for video streaming (default: rtmp://live.vision.sindika.co.id:1935/live)"
96
96
  )
97
97
 
98
98
  run_parser.add_argument(
99
- "--enable-video-sharing-daemon",
100
- action="store_true",
101
- default=True,
102
- help="Enable automatic video sharing daemon management (default: True)"
103
- )
104
-
105
- run_parser.add_argument(
106
- "--disable-video-sharing-daemon",
99
+ "--disable_video_sharing_daemon",
107
100
  action="store_true",
108
101
  default=False,
109
- help="Disable automatic video sharing daemon management"
110
- )
111
-
112
- # Add legacy arguments for backward compatibility (when no subcommand is used)
113
- parser.add_argument(
114
- "--drawing-assets",
115
- help="(Legacy) Path to drawing assets directory (optional, uses bundled assets by default)"
116
- )
117
-
118
- parser.add_argument(
119
- "--log-level",
120
- choices=["DEBUG", "INFO", "WARNING", "ERROR"],
121
- default="INFO",
122
- help="(Legacy) Logging level (default: INFO)"
123
- )
124
-
125
- parser.add_argument(
126
- "--storage-path",
127
- default="data",
128
- help="(Legacy) Storage path for databases and files (default: data)"
129
- )
130
-
131
- parser.add_argument(
132
- "--rtmp-server",
133
- default="rtmp://localhost:1935/live",
134
- help="(Legacy) RTMP server URL for video streaming (default: rtmp://localhost:1935/live)"
135
- )
136
-
137
- parser.add_argument(
138
- "--enable-video-sharing-daemon",
139
- action="store_true",
140
- default=True,
141
- help="(Legacy) Enable automatic video sharing daemon management (default: True)"
142
- )
143
-
144
- parser.add_argument(
145
- "--disable-video-sharing-daemon",
146
- action="store_true",
147
- default=False,
148
- help="(Legacy) Disable automatic video sharing daemon management"
102
+ help="Disable automatic video sharing daemon management (default: False)"
149
103
  )
150
104
 
151
105
  parser.add_argument(
@@ -161,11 +115,7 @@ Detection Callbacks:
161
115
  run_core_service(args)
162
116
  elif args.command == 'doctor':
163
117
  run_doctor()
164
- elif hasattr(args, 'drawing_assets') and args.drawing_assets is not None: # Legacy mode - if any arguments are provided without subcommand
165
- print("⚠️ Warning: Using legacy command format. Consider using 'nedo-core run --drawing-assets ...' instead.")
166
- run_core_service(args)
167
118
  else:
168
- # If no subcommand provided, show help
169
119
  parser.print_help()
170
120
  sys.exit(1)
171
121
 
@@ -197,8 +147,6 @@ def run_core_service(args):
197
147
  enable_daemon = True # Default
198
148
  if hasattr(args, 'disable_video_sharing_daemon') and args.disable_video_sharing_daemon:
199
149
  enable_daemon = False
200
- elif hasattr(args, 'enable_video_sharing_daemon'):
201
- enable_daemon = args.enable_video_sharing_daemon
202
150
 
203
151
  # Create and start the core service
204
152
  service = CoreService(
@@ -19,7 +19,7 @@ def check_python_version():
19
19
  print("🐍 Checking Python version...")
20
20
 
21
21
  version = sys.version_info
22
- min_version = (3, 8)
22
+ min_version = (3, 10)
23
23
 
24
24
  if version >= min_version:
25
25
  print(f" ✅ Python {version.major}.{version.minor}.{version.micro} (meets requirement >= {min_version[0]}.{min_version[1]})")
@@ -20,13 +20,6 @@ class PipelineProcessor:
20
20
  """Handles pipeline processing including preprocessing, AI model inference, tracking, and video stream processing."""
21
21
 
22
22
  def __init__(self, pipeline_id, worker_source_id, model, enable_visualization=True):
23
- """
24
- Initializes the PipelineProcessor with configurable detection labels.
25
-
26
- :param model: The model to use for inference.
27
- :param enable_visualization: Flag to enable visualization.
28
- :param detection_labels: List of object labels to detect.
29
- """
30
23
  self.running = True
31
24
  self.video_debugger = VideoDebugger(enable_visualization)
32
25
  self.tracker_manager = TrackerManager()
@@ -36,7 +29,9 @@ class PipelineProcessor:
36
29
  self.detection_processor = None
37
30
  self.threshold = 0.7
38
31
 
32
+ # Keep the latest frame for detection; size=1 and we overwrite when full
39
33
  self.frame_queue = queue.Queue(maxsize=1)
34
+
40
35
  self.tracked_objects_render = []
41
36
  self.detection_thread = None
42
37
  self.frame_counter = 0
@@ -54,27 +49,28 @@ class PipelineProcessor:
54
49
  self.debug_flag = False
55
50
  self.debug_repo = WorkerSourcePipelineDebugRepository()
56
51
  self.detection_repo = WorkerSourcePipelineDetectionRepository()
52
+
53
+ # Frame recovery mechanism
54
+ self.consecutive_frame_failures = 0
55
+ self.max_consecutive_failures = 150 # 1.5 seconds at 0.01s intervals
56
+ self.last_successful_frame_time = time.time()
57
+ self.stream_recovery_timeout = 30.0 # 30 seconds timeout for stream recovery
58
+
59
+ # HEVC error tracking
60
+ self.hevc_error_count = 0
61
+ self.last_hevc_recovery = 0
62
+ self.hevc_recovery_cooldown = 30.0 # 30 seconds between HEVC recovery attempts
57
63
 
58
64
  def load_model(self, model):
59
- """
60
- Load a new AI model into the detection manager.
61
- This allows runtime model updates without restarting the pipeline.
62
-
63
- :param model: The new AI model to load
64
- """
65
- logging.info(f"🔄 Loading new model for pipeline {self.pipeline_id}: {model.name if model else 'None'}")
65
+ logging.info(f"🔄 Loading new model for pipeline {self.pipeline_id}: {getattr(model, 'name', None) or 'Unknown'}")
66
66
  self.detection_manager.load_model(model)
67
-
68
- # Re-initialize detection processor to use the new model configuration
69
67
  self._update_detection_processor()
70
-
71
68
  logging.info(f"✅ Model updated for pipeline {self.pipeline_id}")
72
69
 
73
70
  def _get_detection_processor_code(self):
74
71
  for code in self.detection_processor_codes:
75
72
  if self.config_manager.is_feature_enabled(code):
76
73
  return code
77
-
78
74
  return None
79
75
 
80
76
  def _get_detection_processor(self, code):
@@ -87,7 +83,6 @@ class PipelineProcessor:
87
83
 
88
84
  def _update_detection_processor(self):
89
85
  code = self._get_detection_processor_code()
90
-
91
86
  if self.detection_processor and self.detection_processor.code == code:
92
87
  return
93
88
 
@@ -107,15 +102,22 @@ class PipelineProcessor:
107
102
  exclusive_attribute_groups=self.detection_processor.exclusive_labels,
108
103
  multi_instance_classes=multi_instance_classes
109
104
  )
110
-
105
+ else:
106
+ # Reset drawer/tracker when no processor enabled
107
+ self.frame_drawer.update_config()
108
+ self.tracker_manager.update_config([], [], [])
109
+
111
110
  def _update_config(self):
112
111
  self.config_manager.update(self.pipeline_id)
113
112
  self.preprocessor.update(self.config_manager)
114
113
  self.detection_interval = self._get_detection_interval()
115
114
  self._update_detection_processor()
115
+
116
+ # Reset failure counters on config update
117
+ self.consecutive_frame_failures = 0
118
+ self.last_successful_frame_time = time.time()
116
119
 
117
120
  ai_model = self.detection_manager.model_metadata
118
-
119
121
  if self.detection_processor:
120
122
  config = self.config_manager.get_feature_config(self.detection_processor.code)
121
123
  self.detection_processor.update(self.config_manager, ai_model)
@@ -125,37 +127,30 @@ class PipelineProcessor:
125
127
  self.frame_drawer.polygons = [((0, 0, 255), p) for p in self.detection_processor.restricted_areas]
126
128
  else:
127
129
  self.threshold = 0.7
128
- self.frame_drawer.update_config()
129
- self.tracker_manager.update_config(
130
- attribute_labels=[],
131
- exclusive_attribute_groups=[],
132
- multi_instance_classes=[]
133
- )
134
130
 
135
131
  def process_pipeline(self, video_manager: VideoStreamManager):
136
- """
137
- Runs the full pipeline processing including preprocessing, detection and tracking.
138
- """
139
132
  pipeline_id = self.pipeline_id
140
133
  worker_source_id = self.worker_source_id
141
-
142
134
  logging.info(f"🎯 Running pipeline processing for pipeline {pipeline_id} | Source: {worker_source_id}")
143
135
 
144
136
  self._update_config()
137
+ self.consecutive_frame_failures = 0
138
+ self.last_successful_frame_time = time.time()
145
139
 
146
140
  initial_frame = self._wait_for_frame(video_manager)
147
141
  if initial_frame is None:
148
142
  logging.error(f"❌ Pipeline {pipeline_id} | Source {worker_source_id}: No initial frame available. Exiting...")
149
143
  return
150
144
 
145
+ # Start RTMP (internal pacer thread will begin on first push_frame())
151
146
  self.rtmp_streamer = RTMPStreamer(pipeline_id)
152
147
 
153
- # Start detection in a separate thread
148
+ # Start detection thread
154
149
  self.detection_thread = threading.Thread(
155
150
  target=self._detection_worker,
156
- name=f"detection-{pipeline_id}"
151
+ name=f"detection-{pipeline_id}",
152
+ daemon=True
157
153
  )
158
- self.detection_thread.daemon = True
159
154
  self.detection_thread.start()
160
155
 
161
156
  try:
@@ -163,64 +158,88 @@ class PipelineProcessor:
163
158
  frame = video_manager.get_frame(worker_source_id)
164
159
 
165
160
  if frame is None:
166
- logging.warning(f"⚠️ No frame available for {worker_source_id}. Retrying...")
167
- # Check if stream was removed
168
- if not video_manager.has_stream(worker_source_id):
169
- logging.info(f"🛑 Stream {worker_source_id} was removed, stopping pipeline")
161
+ if not self._handle_frame_failure(video_manager, worker_source_id):
170
162
  break
171
- time.sleep(0.01)
163
+ # no frame this tick—just continue (the streamer will repeat last good frame)
172
164
  continue
173
-
165
+
166
+ # cv2.imshow("AA", frame)
167
+ # cv2.waitKey(1)
168
+ # continue
169
+
170
+ # successful frame
171
+ self.consecutive_frame_failures = 0
172
+ self.last_successful_frame_time = time.time()
174
173
  self.frame_counter += 1
175
174
 
176
- self.frame_drawer.draw_polygons(frame)
177
- drawn_frame = self.frame_drawer.draw_frame(
178
- frame.copy(),
179
- self.tracked_objects_render,
180
- with_trails=True,
181
- trail_length=int(2 / self.detection_interval)
182
- )
175
+ # draw annotations
176
+ try:
177
+ self.frame_drawer.draw_polygons(frame)
178
+ drawn_frame = self.frame_drawer.draw_frame(
179
+ frame.copy(),
180
+ self.tracked_objects_render,
181
+ with_trails=True,
182
+ trail_length=int(max(1, 2 / self.detection_interval))
183
+ )
184
+ except Exception as e:
185
+ logging.error(f"❌ Draw failed, using raw frame: {e}")
186
+ drawn_frame = frame
183
187
 
188
+ # debug snapshot if requested
184
189
  if self.debug_flag:
185
190
  tracked_objects_render = self._process_frame(frame)
186
-
187
- self.debug_repo.update_debug_entries_by_pipeline_id(
188
- self.pipeline_id,
189
- self.frame_drawer.draw_frame(
190
- frame.copy(),
191
+ try:
192
+ self.debug_repo.update_debug_entries_by_pipeline_id(
193
+ self.pipeline_id,
194
+ self.frame_drawer.draw_frame(frame.copy(), tracked_objects_render),
191
195
  tracked_objects_render
192
- ),
193
- tracked_objects_render
194
- )
196
+ )
197
+ except Exception as e:
198
+ logging.warning(f"Debug save failed: {e}")
195
199
  self.debug_flag = False
196
200
 
197
- # Check RTMP streamer status before sending frame
198
- if self.rtmp_streamer:
201
+ # ---- RTMP push (latest-only; pacing handled inside RTMPStreamer) ----
202
+ if self.rtmp_streamer is None or not self.rtmp_streamer.is_active():
203
+ # (re)create; it will learn WxH on first push
204
+ self.rtmp_streamer = RTMPStreamer(self.pipeline_id)
205
+
206
+ try:
207
+ self.rtmp_streamer.push_frame(drawn_frame)
208
+ except Exception as e:
209
+ logging.error(f"❌ RTMP push error: {e}")
199
210
  try:
200
- self.rtmp_streamer.send_frame(drawn_frame)
201
- except Exception as e:
202
- logging.error(f"❌ RTMP streaming error: {e}")
203
- # Stop RTMP streamer on error
204
211
  self.rtmp_streamer.stop_stream()
205
- self.rtmp_streamer = None
212
+ except Exception:
213
+ pass
214
+ self.rtmp_streamer = None
206
215
 
207
- # Only put frame in queue if detection thread is still running
216
+ # feed detection worker with latest-only behavior
208
217
  if self.detection_thread and self.detection_thread.is_alive():
209
- if not self.frame_queue.full():
210
- self.frame_queue.put(frame, block=False)
211
-
218
+ try:
219
+ self.frame_queue.put_nowait(frame)
220
+ except queue.Full:
221
+ try:
222
+ _ = self.frame_queue.get_nowait()
223
+ except queue.Empty:
224
+ pass
225
+ try:
226
+ self.frame_queue.put_nowait(frame)
227
+ except queue.Full:
228
+ pass
229
+
230
+ # visualize
212
231
  try:
213
232
  self.video_debugger.show_frame(pipeline_id, worker_source_id, drawn_frame)
214
233
  except Exception as e:
215
234
  logging.error(f"⚠️ Failed to render frame for pipeline {pipeline_id}: {e}")
235
+
236
+ time.sleep(0.1)
216
237
 
217
- time.sleep(0.01)
218
238
  except Exception as e:
219
239
  logging.error(f"❌ Error in pipeline {pipeline_id}: {e}", exc_info=True)
220
240
 
221
241
  def _process_frame(self, frame):
222
242
  dimension = frame.shape[:2]
223
-
224
243
  processed_frame = self.preprocessor.apply(frame)
225
244
 
226
245
  class_thresholds = {}
@@ -234,7 +253,7 @@ class PipelineProcessor:
234
253
  if main_threshold and ai_model and ai_model.get_main_class():
235
254
  class_thresholds[ai_model.get_main_class()] = main_threshold
236
255
 
237
- detections = self.detection_manager.detect_objects(processed_frame, self.threshold, class_thresholds)
256
+ detections = self.detection_manager.detect_objects(processed_frame, self.threshold, class_thresholds)
238
257
  detections = self.preprocessor.revert_detections_bboxes(detections, dimension)
239
258
 
240
259
  if self.detection_processor:
@@ -243,17 +262,13 @@ class PipelineProcessor:
243
262
  else:
244
263
  return self.tracker_manager.track_objects(detections)
245
264
 
246
-
247
265
  def _detection_worker(self):
248
- """
249
- Runs detection in a separate thread and updates configuration periodically.
250
- Applies preprocessing based on pipeline configuration.
251
- """
266
+ """Runs detection in a separate thread and updates configuration periodically."""
252
267
  pipeline_id = self.pipeline_id
253
268
  worker_source_id = self.worker_source_id
254
269
  last_detection_time = time.time()
255
- last_config_update_time = time.time()
256
- config_update_interval = 5 # Update configuration every 5 seconds
270
+ last_config_update_time = time.time()
271
+ config_update_interval = 5 # seconds
257
272
 
258
273
  while self.running:
259
274
  try:
@@ -263,16 +278,22 @@ class PipelineProcessor:
263
278
  # Update config periodically
264
279
  if (current_time - last_config_update_time) >= config_update_interval:
265
280
  self._update_config()
266
- last_config_update_time = current_time
281
+ last_config_update_time = current_time
267
282
  logging.info(f"🔄 Updated pipeline config for {pipeline_id}")
268
283
 
269
- # Process detection only if enough time has passed since last detection
270
- # detection_interval is the time in seconds between consecutive detections
284
+ # Keep only the latest frame if we fell behind
285
+ try:
286
+ while True:
287
+ newer = self.frame_queue.get_nowait()
288
+ frame = newer
289
+ except queue.Empty:
290
+ pass
291
+
292
+ # Respect detection interval
271
293
  if (current_time - last_detection_time) < self.detection_interval:
272
294
  continue
295
+ last_detection_time = current_time
273
296
 
274
- last_detection_time = current_time
275
-
276
297
  if self.detection_processor is None or frame is None or frame.size == 0:
277
298
  self.tracked_objects_render = []
278
299
  continue
@@ -304,33 +325,165 @@ class PipelineProcessor:
304
325
  logging.error(f"❌ Error in detection thread for pipeline {pipeline_id}: {e}", exc_info=True)
305
326
 
306
327
  def _wait_for_frame(self, video_manager, max_retries=10, sleep_time=3):
307
- """Waits until a frame is available from the video source."""
328
+ logging.info(f" Waiting for initial frame from {self.worker_source_id}...")
308
329
  for retry_count in range(max_retries):
309
330
  frame = video_manager.get_frame(self.worker_source_id)
310
331
  if frame is not None:
332
+ logging.info(f"✅ Initial frame received from {self.worker_source_id}")
311
333
  return frame
334
+
335
+ if not video_manager.has_stream(self.worker_source_id):
336
+ logging.error(f"❌ Stream {self.worker_source_id} not found in video manager")
337
+ return None
338
+
312
339
  logging.warning(f"⚠️ Waiting for video stream {self.worker_source_id} (Attempt {retry_count + 1}/{max_retries})...")
340
+ if retry_count >= 3:
341
+ self._log_stream_diagnostics(video_manager, self.worker_source_id)
313
342
  time.sleep(sleep_time)
314
343
 
344
+ logging.error(f"❌ Failed to get initial frame from {self.worker_source_id} after {max_retries} attempts")
315
345
  return None
316
346
 
347
+ def _handle_frame_failure(self, video_manager, worker_source_id):
348
+ """Handle frame retrieval failures with progressive backoff and recovery attempts."""
349
+ self.consecutive_frame_failures += 1
350
+
351
+ if not video_manager.has_stream(worker_source_id):
352
+ logging.info(f"🛑 Stream {worker_source_id} was removed, stopping pipeline")
353
+ return False
354
+
355
+ time_since_last_frame = time.time() - self.last_successful_frame_time
356
+ if time_since_last_frame > self.stream_recovery_timeout:
357
+ logging.error(f"❌ Stream {worker_source_id} recovery timeout ({self.stream_recovery_timeout}s). Stopping pipeline.")
358
+ return False
359
+
360
+ if self.consecutive_frame_failures <= 10:
361
+ if self.consecutive_frame_failures % 5 == 1:
362
+ logging.debug(f"⚠️ No frame for {worker_source_id} (attempt {self.consecutive_frame_failures})")
363
+ time.sleep(0.01)
364
+ elif self.consecutive_frame_failures <= 50:
365
+ if self.consecutive_frame_failures % 10 == 1:
366
+ logging.warning(f"⚠️ No frame for {worker_source_id} (attempt {self.consecutive_frame_failures}). Stream may be reconnecting...")
367
+ time.sleep(0.05)
368
+ elif self.consecutive_frame_failures <= self.max_consecutive_failures:
369
+ if self.consecutive_frame_failures % 20 == 1:
370
+ logging.warning(f"⚠️ Persistent frame issues for {worker_source_id} (attempt {self.consecutive_frame_failures}). Checking stream health...")
371
+ self._log_stream_diagnostics(video_manager, worker_source_id)
372
+ if self.consecutive_frame_failures % 60 == 1:
373
+ if self._should_attempt_hevc_recovery(video_manager, worker_source_id):
374
+ logging.info("🔧 Attempting HEVC-specific recovery for persistent frame failures...")
375
+ if self._handle_hevc_recovery(video_manager, worker_source_id):
376
+ logging.info("✅ HEVC recovery successful, continuing pipeline...")
377
+ return True
378
+ time.sleep(0.1)
379
+ else:
380
+ logging.error(f"❌ Too many consecutive frame failures for {worker_source_id} ({self.consecutive_frame_failures}). Stopping pipeline.")
381
+ self._log_stream_diagnostics(video_manager, worker_source_id)
382
+ return False
383
+
384
+ return True
385
+
386
+ def _log_stream_diagnostics(self, video_manager, worker_source_id):
387
+ try:
388
+ stream_url = video_manager.get_stream_url(worker_source_id)
389
+ is_file = video_manager.is_video_file(worker_source_id)
390
+ if hasattr(video_manager, 'streams') and worker_source_id in video_manager.streams:
391
+ stream = video_manager.streams[worker_source_id]
392
+ state = stream.get_state() if hasattr(stream, 'get_state') else "unknown"
393
+ is_connected = stream.is_connected() if hasattr(stream, 'is_connected') else "unknown"
394
+
395
+ logging.info(f"📊 Stream diagnostics for {worker_source_id}:")
396
+ logging.info(f" URL: {stream_url}")
397
+ logging.info(f" Type: {'Video file' if is_file else 'Live stream'}")
398
+ logging.info(f" State: {state}")
399
+ logging.info(f" Connected: {is_connected}")
400
+ logging.info(f" Time since last frame: {time.time() - self.last_successful_frame_time:.1f}s")
401
+
402
+ if hasattr(stream, 'get_codec_info'):
403
+ codec_info = stream.get_codec_info()
404
+ if codec_info:
405
+ logging.info(f" Codec: {codec_info}")
406
+ if 'hevc' in str(codec_info).lower() or 'h265' in str(codec_info).lower():
407
+ logging.warning(" ⚠️ HEVC stream detected - potential QP/POC errors")
408
+
409
+ if hasattr(stream, 'get_recent_errors'):
410
+ recent_errors = stream.get_recent_errors()
411
+ if recent_errors:
412
+ hevc_errors = [err for err in recent_errors if 'cu_qp_delta' in str(err.get('error', '')) or 'Could not find ref with POC' in str(err.get('error', ''))]
413
+ if hevc_errors:
414
+ logging.warning(f" 🔥 Recent HEVC errors: {len(hevc_errors)}")
415
+ self.hevc_error_count += len(hevc_errors)
416
+ for i, err in enumerate(hevc_errors[-3:]):
417
+ logging.warning(f" 🔥 HEVC Error {i+1}: {err.get('error', '')[:100]}...")
418
+ else:
419
+ logging.info(f"📊 Stream {worker_source_id} not found in registry; checking device directly...")
420
+ except Exception as e:
421
+ logging.error(f"Error getting stream diagnostics: {e}")
422
+
423
+ def _should_attempt_hevc_recovery(self, video_manager, worker_source_id) -> bool:
424
+ current_time = time.time()
425
+ if current_time - self.last_hevc_recovery < self.hevc_recovery_cooldown:
426
+ logging.debug(f"HEVC recovery on cooldown ({current_time - self.last_hevc_recovery:.1f}s elapsed)")
427
+ return False
428
+
429
+ if hasattr(video_manager, 'streams') and worker_source_id in video_manager.streams:
430
+ stream = video_manager.streams[worker_source_id]
431
+ if hasattr(stream, 'get_recent_errors'):
432
+ recent_errors = stream.get_recent_errors(max_age_seconds=60)
433
+ hevc_errors = [err for err in recent_errors if
434
+ 'cu_qp_delta' in str(err.get('error', '')) or
435
+ 'Could not find ref with POC' in str(err.get('error', ''))]
436
+ if len(hevc_errors) >= 3:
437
+ logging.info(f"HEVC recovery warranted: {len(hevc_errors)} HEVC errors in last minute")
438
+ return True
439
+
440
+ if self.hevc_error_count >= 5:
441
+ logging.info(f"HEVC recovery warranted: {self.hevc_error_count} total HEVC errors detected")
442
+ return True
443
+
444
+ return False
445
+
446
+ def _handle_hevc_recovery(self, video_manager, worker_source_id):
447
+ try:
448
+ self.last_hevc_recovery = time.time()
449
+ logging.info(f"🔧 Attempting HEVC stream recovery for {worker_source_id}")
450
+ stream_url = video_manager.get_stream_url(worker_source_id)
451
+ if not stream_url:
452
+ logging.error(f" Cannot get stream URL for {worker_source_id}")
453
+ return False
454
+
455
+ video_manager.remove_stream(worker_source_id)
456
+ time.sleep(1.0)
457
+ video_manager.add_stream(worker_source_id, stream_url)
458
+ time.sleep(2.0)
459
+
460
+ if not video_manager.has_stream(worker_source_id):
461
+ logging.error(f" Failed to recreate stream {worker_source_id}")
462
+ return False
463
+
464
+ self.reset_frame_failure_counters()
465
+ self.hevc_error_count = 0
466
+ logging.info(f"✅ HEVC recovery attempt completed for {worker_source_id}")
467
+ return True
468
+ except Exception as e:
469
+ logging.error(f"❌ HEVC recovery failed for {worker_source_id}: {e}")
470
+ return False
471
+
317
472
  def stop(self):
318
473
  """Stops the Pipeline processor and cleans up resources."""
319
- if not self.running: # Prevent multiple stops
474
+ if not self.running:
320
475
  return
321
-
322
476
  logging.info("🛑 Stopping PipelineProcessor...")
323
477
  self.running = False
324
478
 
325
- # Stop RTMP streamer first
326
479
  if hasattr(self, 'rtmp_streamer') and self.rtmp_streamer:
327
480
  try:
328
481
  self.rtmp_streamer.stop_stream()
329
- self.rtmp_streamer = None
330
482
  except Exception as e:
331
483
  logging.error(f"Error stopping RTMP streamer: {e}")
484
+ finally:
485
+ self.rtmp_streamer = None
332
486
 
333
- # Clear frame queue before joining thread
334
487
  try:
335
488
  while True:
336
489
  try:
@@ -340,10 +493,9 @@ class PipelineProcessor:
340
493
  except Exception as e:
341
494
  logging.error(f"Error clearing frame queue: {e}")
342
495
 
343
- # Wait for detection thread with increased timeout
344
496
  if self.detection_thread and self.detection_thread.is_alive():
345
497
  try:
346
- self.detection_thread.join(timeout=5.0) # Increased timeout
498
+ self.detection_thread.join(timeout=5.0)
347
499
  if self.detection_thread.is_alive():
348
500
  logging.warning("Detection thread did not terminate cleanly")
349
501
  except Exception as e:
@@ -351,10 +503,8 @@ class PipelineProcessor:
351
503
  finally:
352
504
  self.detection_thread = None
353
505
 
354
- # Clear tracked objects
355
- self.tracked_objects_render.clear() # Use clear() instead of reassignment
506
+ self.tracked_objects_render.clear()
356
507
 
357
- # Close video debugger windows last
358
508
  try:
359
509
  if hasattr(self, 'video_debugger'):
360
510
  self.video_debugger.close_all()
@@ -364,18 +514,41 @@ class PipelineProcessor:
364
514
  logging.info("✅ PipelineProcessor stopped successfully")
365
515
 
366
516
  def _get_detection_interval(self):
367
- """
368
- Get detection interval from configuration.
369
- Converts frames per second to seconds per frame.
370
- """
371
517
  config = self.config_manager.get_feature_config("processing_speed")
372
518
  fps = config.get("decimal", 1.0)
373
-
374
519
  if fps <= 0:
375
- return 1 / 10 # Default to 10 frame per second if fps is zero or negative
376
-
377
- return 1.0 / fps # Convert fps to seconds per frame
520
+ return 1.0 / 10.0 # default 10 fps
521
+ return 1.0 / fps
378
522
 
379
523
  def enable_debug(self):
380
- """Enable debug mode for this pipeline."""
381
- self.debug_flag = True
524
+ self.debug_flag = True
525
+ self.consecutive_frame_failures = 0
526
+ self.last_successful_frame_time = time.time()
527
+
528
+ def reset_frame_failure_counters(self):
529
+ logging.info(f"🔄 Resetting frame failure counters for pipeline {self.pipeline_id}")
530
+ self.consecutive_frame_failures = 0
531
+ self.last_successful_frame_time = time.time()
532
+ self.hevc_error_count = 0
533
+
534
+ def get_hevc_diagnostics(self, video_manager) -> dict:
535
+ diagnostics = {
536
+ 'hevc_error_count': self.hevc_error_count,
537
+ 'last_hevc_recovery': self.last_hevc_recovery,
538
+ 'time_since_last_recovery': time.time() - self.last_hevc_recovery,
539
+ 'recovery_cooldown_remaining': max(0, self.hevc_recovery_cooldown - (time.time() - self.last_hevc_recovery)),
540
+ 'consecutive_failures': self.consecutive_frame_failures,
541
+ 'time_since_last_frame': time.time() - self.last_successful_frame_time,
542
+ }
543
+ if hasattr(video_manager, 'streams') and self.worker_source_id in video_manager.streams:
544
+ stream = video_manager.streams[self.worker_source_id]
545
+ if hasattr(stream, 'get_codec_info'):
546
+ diagnostics['codec'] = stream.get_codec_info()
547
+ if hasattr(stream, 'get_recent_errors'):
548
+ recent_errors = stream.get_recent_errors(max_age_seconds=300)
549
+ hevc_errors = [err for err in recent_errors if
550
+ 'cu_qp_delta' in str(err.get('error', '')) or
551
+ 'Could not find ref with POC' in str(err.get('error', ''))]
552
+ diagnostics['recent_hevc_errors'] = len(hevc_errors)
553
+ diagnostics['total_recent_errors'] = len(recent_errors)
554
+ return diagnostics