nedo-vision-worker-core 0.3.1__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nedo-vision-worker-core might be problematic. Click here for more details.

@@ -7,7 +7,7 @@ A library for running AI vision processing and detection in the Nedo Vision plat
7
7
  from .core_service import CoreService
8
8
  from .callbacks import DetectionType, CallbackTrigger, DetectionData, IntervalMetadata
9
9
 
10
- __version__ = "0.3.1"
10
+ __version__ = "0.3.2"
11
11
  __all__ = [
12
12
  "CoreService",
13
13
  "DetectionType",
@@ -19,7 +19,7 @@ def check_python_version():
19
19
  print("🐍 Checking Python version...")
20
20
 
21
21
  version = sys.version_info
22
- min_version = (3, 8)
22
+ min_version = (3, 10)
23
23
 
24
24
  if version >= min_version:
25
25
  print(f" ✅ Python {version.major}.{version.minor}.{version.micro} (meets requirement >= {min_version[0]}.{min_version[1]})")
@@ -20,13 +20,6 @@ class PipelineProcessor:
20
20
  """Handles pipeline processing including preprocessing, AI model inference, tracking, and video stream processing."""
21
21
 
22
22
  def __init__(self, pipeline_id, worker_source_id, model, enable_visualization=True):
23
- """
24
- Initializes the PipelineProcessor with configurable detection labels.
25
-
26
- :param model: The model to use for inference.
27
- :param enable_visualization: Flag to enable visualization.
28
- :param detection_labels: List of object labels to detect.
29
- """
30
23
  self.running = True
31
24
  self.video_debugger = VideoDebugger(enable_visualization)
32
25
  self.tracker_manager = TrackerManager()
@@ -36,7 +29,9 @@ class PipelineProcessor:
36
29
  self.detection_processor = None
37
30
  self.threshold = 0.7
38
31
 
32
+ # Keep the latest frame for detection; size=1 and we overwrite when full
39
33
  self.frame_queue = queue.Queue(maxsize=1)
34
+
40
35
  self.tracked_objects_render = []
41
36
  self.detection_thread = None
42
37
  self.frame_counter = 0
@@ -67,25 +62,15 @@ class PipelineProcessor:
67
62
  self.hevc_recovery_cooldown = 30.0 # 30 seconds between HEVC recovery attempts
68
63
 
69
64
  def load_model(self, model):
70
- """
71
- Load a new AI model into the detection manager.
72
- This allows runtime model updates without restarting the pipeline.
73
-
74
- :param model: The new AI model to load
75
- """
76
- logging.info(f"🔄 Loading new model for pipeline {self.pipeline_id}: {model.name if model else 'None'}")
65
+ logging.info(f"🔄 Loading new model for pipeline {self.pipeline_id}: {getattr(model, 'name', None) or 'Unknown'}")
77
66
  self.detection_manager.load_model(model)
78
-
79
- # Re-initialize detection processor to use the new model configuration
80
67
  self._update_detection_processor()
81
-
82
68
  logging.info(f"✅ Model updated for pipeline {self.pipeline_id}")
83
69
 
84
70
  def _get_detection_processor_code(self):
85
71
  for code in self.detection_processor_codes:
86
72
  if self.config_manager.is_feature_enabled(code):
87
73
  return code
88
-
89
74
  return None
90
75
 
91
76
  def _get_detection_processor(self, code):
@@ -98,7 +83,6 @@ class PipelineProcessor:
98
83
 
99
84
  def _update_detection_processor(self):
100
85
  code = self._get_detection_processor_code()
101
-
102
86
  if self.detection_processor and self.detection_processor.code == code:
103
87
  return
104
88
 
@@ -118,19 +102,22 @@ class PipelineProcessor:
118
102
  exclusive_attribute_groups=self.detection_processor.exclusive_labels,
119
103
  multi_instance_classes=multi_instance_classes
120
104
  )
121
-
105
+ else:
106
+ # Reset drawer/tracker when no processor enabled
107
+ self.frame_drawer.update_config()
108
+ self.tracker_manager.update_config([], [], [])
109
+
122
110
  def _update_config(self):
123
111
  self.config_manager.update(self.pipeline_id)
124
112
  self.preprocessor.update(self.config_manager)
125
113
  self.detection_interval = self._get_detection_interval()
126
114
  self._update_detection_processor()
127
115
 
128
- # Reset frame failure counters on config update
116
+ # Reset failure counters on config update
129
117
  self.consecutive_frame_failures = 0
130
118
  self.last_successful_frame_time = time.time()
131
119
 
132
120
  ai_model = self.detection_manager.model_metadata
133
-
134
121
  if self.detection_processor:
135
122
  config = self.config_manager.get_feature_config(self.detection_processor.code)
136
123
  self.detection_processor.update(self.config_manager, ai_model)
@@ -140,25 +127,13 @@ class PipelineProcessor:
140
127
  self.frame_drawer.polygons = [((0, 0, 255), p) for p in self.detection_processor.restricted_areas]
141
128
  else:
142
129
  self.threshold = 0.7
143
- self.frame_drawer.update_config()
144
- self.tracker_manager.update_config(
145
- attribute_labels=[],
146
- exclusive_attribute_groups=[],
147
- multi_instance_classes=[]
148
- )
149
130
 
150
131
  def process_pipeline(self, video_manager: VideoStreamManager):
151
- """
152
- Runs the full pipeline processing including preprocessing, detection and tracking.
153
- """
154
132
  pipeline_id = self.pipeline_id
155
133
  worker_source_id = self.worker_source_id
156
-
157
134
  logging.info(f"🎯 Running pipeline processing for pipeline {pipeline_id} | Source: {worker_source_id}")
158
135
 
159
136
  self._update_config()
160
-
161
- # Reset failure counters at start
162
137
  self.consecutive_frame_failures = 0
163
138
  self.last_successful_frame_time = time.time()
164
139
 
@@ -167,14 +142,15 @@ class PipelineProcessor:
167
142
  logging.error(f"❌ Pipeline {pipeline_id} | Source {worker_source_id}: No initial frame available. Exiting...")
168
143
  return
169
144
 
145
+ # Start RTMP (internal pacer thread will begin on first push_frame())
170
146
  self.rtmp_streamer = RTMPStreamer(pipeline_id)
171
147
 
172
- # Start detection in a separate thread
148
+ # Start detection thread
173
149
  self.detection_thread = threading.Thread(
174
150
  target=self._detection_worker,
175
- name=f"detection-{pipeline_id}"
151
+ name=f"detection-{pipeline_id}",
152
+ daemon=True
176
153
  )
177
- self.detection_thread.daemon = True
178
154
  self.detection_thread.start()
179
155
 
180
156
  try:
@@ -184,62 +160,86 @@ class PipelineProcessor:
184
160
  if frame is None:
185
161
  if not self._handle_frame_failure(video_manager, worker_source_id):
186
162
  break
163
+ # no frame this tick—just continue (the streamer will repeat last good frame)
187
164
  continue
188
-
189
- # Reset failure counters on successful frame
165
+
166
+ # cv2.imshow("AA", frame)
167
+ # cv2.waitKey(1)
168
+ # continue
169
+
170
+ # successful frame
190
171
  self.consecutive_frame_failures = 0
191
172
  self.last_successful_frame_time = time.time()
192
-
193
173
  self.frame_counter += 1
194
174
 
195
- self.frame_drawer.draw_polygons(frame)
196
- drawn_frame = self.frame_drawer.draw_frame(
197
- frame.copy(),
198
- self.tracked_objects_render,
199
- with_trails=True,
200
- trail_length=int(2 / self.detection_interval)
201
- )
175
+ # draw annotations
176
+ try:
177
+ self.frame_drawer.draw_polygons(frame)
178
+ drawn_frame = self.frame_drawer.draw_frame(
179
+ frame.copy(),
180
+ self.tracked_objects_render,
181
+ with_trails=True,
182
+ trail_length=int(max(1, 2 / self.detection_interval))
183
+ )
184
+ except Exception as e:
185
+ logging.error(f"❌ Draw failed, using raw frame: {e}")
186
+ drawn_frame = frame
202
187
 
188
+ # debug snapshot if requested
203
189
  if self.debug_flag:
204
190
  tracked_objects_render = self._process_frame(frame)
205
-
206
- self.debug_repo.update_debug_entries_by_pipeline_id(
207
- self.pipeline_id,
208
- self.frame_drawer.draw_frame(
209
- frame.copy(),
191
+ try:
192
+ self.debug_repo.update_debug_entries_by_pipeline_id(
193
+ self.pipeline_id,
194
+ self.frame_drawer.draw_frame(frame.copy(), tracked_objects_render),
210
195
  tracked_objects_render
211
- ),
212
- tracked_objects_render
213
- )
196
+ )
197
+ except Exception as e:
198
+ logging.warning(f"Debug save failed: {e}")
214
199
  self.debug_flag = False
215
200
 
216
- # Check RTMP streamer status before sending frame
217
- if self.rtmp_streamer:
201
+ # ---- RTMP push (latest-only; pacing handled inside RTMPStreamer) ----
202
+ if self.rtmp_streamer is None or not self.rtmp_streamer.is_active():
203
+ # (re)create; it will learn WxH on first push
204
+ self.rtmp_streamer = RTMPStreamer(self.pipeline_id)
205
+
206
+ try:
207
+ self.rtmp_streamer.push_frame(drawn_frame)
208
+ except Exception as e:
209
+ logging.error(f"❌ RTMP push error: {e}")
218
210
  try:
219
- self.rtmp_streamer.send_frame(drawn_frame)
220
- except Exception as e:
221
- logging.error(f"❌ RTMP streaming error: {e}")
222
- # Stop RTMP streamer on error
223
211
  self.rtmp_streamer.stop_stream()
224
- self.rtmp_streamer = None
212
+ except Exception:
213
+ pass
214
+ self.rtmp_streamer = None
225
215
 
226
- # Only put frame in queue if detection thread is still running
216
+ # feed detection worker with latest-only behavior
227
217
  if self.detection_thread and self.detection_thread.is_alive():
228
- if not self.frame_queue.full():
229
- self.frame_queue.put(frame, block=False)
230
-
218
+ try:
219
+ self.frame_queue.put_nowait(frame)
220
+ except queue.Full:
221
+ try:
222
+ _ = self.frame_queue.get_nowait()
223
+ except queue.Empty:
224
+ pass
225
+ try:
226
+ self.frame_queue.put_nowait(frame)
227
+ except queue.Full:
228
+ pass
229
+
230
+ # visualize
231
231
  try:
232
232
  self.video_debugger.show_frame(pipeline_id, worker_source_id, drawn_frame)
233
233
  except Exception as e:
234
234
  logging.error(f"⚠️ Failed to render frame for pipeline {pipeline_id}: {e}")
235
+
236
+ time.sleep(0.1)
235
237
 
236
- time.sleep(0.01)
237
238
  except Exception as e:
238
239
  logging.error(f"❌ Error in pipeline {pipeline_id}: {e}", exc_info=True)
239
240
 
240
241
  def _process_frame(self, frame):
241
242
  dimension = frame.shape[:2]
242
-
243
243
  processed_frame = self.preprocessor.apply(frame)
244
244
 
245
245
  class_thresholds = {}
@@ -253,7 +253,7 @@ class PipelineProcessor:
253
253
  if main_threshold and ai_model and ai_model.get_main_class():
254
254
  class_thresholds[ai_model.get_main_class()] = main_threshold
255
255
 
256
- detections = self.detection_manager.detect_objects(processed_frame, self.threshold, class_thresholds)
256
+ detections = self.detection_manager.detect_objects(processed_frame, self.threshold, class_thresholds)
257
257
  detections = self.preprocessor.revert_detections_bboxes(detections, dimension)
258
258
 
259
259
  if self.detection_processor:
@@ -262,17 +262,13 @@ class PipelineProcessor:
262
262
  else:
263
263
  return self.tracker_manager.track_objects(detections)
264
264
 
265
-
266
265
  def _detection_worker(self):
267
- """
268
- Runs detection in a separate thread and updates configuration periodically.
269
- Applies preprocessing based on pipeline configuration.
270
- """
266
+ """Runs detection in a separate thread and updates configuration periodically."""
271
267
  pipeline_id = self.pipeline_id
272
268
  worker_source_id = self.worker_source_id
273
269
  last_detection_time = time.time()
274
- last_config_update_time = time.time()
275
- config_update_interval = 5 # Update configuration every 5 seconds
270
+ last_config_update_time = time.time()
271
+ config_update_interval = 5 # seconds
276
272
 
277
273
  while self.running:
278
274
  try:
@@ -282,16 +278,22 @@ class PipelineProcessor:
282
278
  # Update config periodically
283
279
  if (current_time - last_config_update_time) >= config_update_interval:
284
280
  self._update_config()
285
- last_config_update_time = current_time
281
+ last_config_update_time = current_time
286
282
  logging.info(f"🔄 Updated pipeline config for {pipeline_id}")
287
283
 
288
- # Process detection only if enough time has passed since last detection
289
- # detection_interval is the time in seconds between consecutive detections
284
+ # Keep only the latest frame if we fell behind
285
+ try:
286
+ while True:
287
+ newer = self.frame_queue.get_nowait()
288
+ frame = newer
289
+ except queue.Empty:
290
+ pass
291
+
292
+ # Respect detection interval
290
293
  if (current_time - last_detection_time) < self.detection_interval:
291
294
  continue
295
+ last_detection_time = current_time
292
296
 
293
- last_detection_time = current_time
294
-
295
297
  if self.detection_processor is None or frame is None or frame.size == 0:
296
298
  self.tracked_objects_render = []
297
299
  continue
@@ -323,79 +325,58 @@ class PipelineProcessor:
323
325
  logging.error(f"❌ Error in detection thread for pipeline {pipeline_id}: {e}", exc_info=True)
324
326
 
325
327
  def _wait_for_frame(self, video_manager, max_retries=10, sleep_time=3):
326
- """Waits until a frame is available from the video source."""
327
328
  logging.info(f"⏳ Waiting for initial frame from {self.worker_source_id}...")
328
-
329
329
  for retry_count in range(max_retries):
330
330
  frame = video_manager.get_frame(self.worker_source_id)
331
331
  if frame is not None:
332
332
  logging.info(f"✅ Initial frame received from {self.worker_source_id}")
333
333
  return frame
334
334
 
335
- # Check if stream exists
336
335
  if not video_manager.has_stream(self.worker_source_id):
337
336
  logging.error(f"❌ Stream {self.worker_source_id} not found in video manager")
338
337
  return None
339
338
 
340
339
  logging.warning(f"⚠️ Waiting for video stream {self.worker_source_id} (Attempt {retry_count + 1}/{max_retries})...")
341
-
342
- # Log stream diagnostics on later attempts
343
340
  if retry_count >= 3:
344
341
  self._log_stream_diagnostics(video_manager, self.worker_source_id)
345
-
346
342
  time.sleep(sleep_time)
347
343
 
348
344
  logging.error(f"❌ Failed to get initial frame from {self.worker_source_id} after {max_retries} attempts")
349
345
  return None
350
346
 
351
347
  def _handle_frame_failure(self, video_manager, worker_source_id):
352
- """
353
- Handle frame retrieval failures with progressive backoff and recovery attempts.
354
- Returns False if pipeline should stop, True to continue.
355
- """
348
+ """Handle frame retrieval failures with progressive backoff and recovery attempts."""
356
349
  self.consecutive_frame_failures += 1
357
350
 
358
- # Check if stream was removed
359
351
  if not video_manager.has_stream(worker_source_id):
360
352
  logging.info(f"🛑 Stream {worker_source_id} was removed, stopping pipeline")
361
353
  return False
362
354
 
363
- # Check for stream recovery timeout
364
355
  time_since_last_frame = time.time() - self.last_successful_frame_time
365
356
  if time_since_last_frame > self.stream_recovery_timeout:
366
357
  logging.error(f"❌ Stream {worker_source_id} recovery timeout ({self.stream_recovery_timeout}s). Stopping pipeline.")
367
358
  return False
368
359
 
369
- # Progressive logging and backoff
370
360
  if self.consecutive_frame_failures <= 10:
371
- # First 10 failures: minimal logging, fast retry
372
- if self.consecutive_frame_failures % 5 == 1: # Log every 5th failure
373
- logging.debug(f"⚠️ No frame available for {worker_source_id} (attempt {self.consecutive_frame_failures})")
361
+ if self.consecutive_frame_failures % 5 == 1:
362
+ logging.debug(f"⚠️ No frame for {worker_source_id} (attempt {self.consecutive_frame_failures})")
374
363
  time.sleep(0.01)
375
364
  elif self.consecutive_frame_failures <= 50:
376
- # 11-50 failures: moderate logging, slightly longer wait
377
- if self.consecutive_frame_failures % 10 == 1: # Log every 10th failure
378
- logging.warning(f"⚠️ No frame available for {worker_source_id} (attempt {self.consecutive_frame_failures}). Stream may be reconnecting...")
365
+ if self.consecutive_frame_failures % 10 == 1:
366
+ logging.warning(f"⚠️ No frame for {worker_source_id} (attempt {self.consecutive_frame_failures}). Stream may be reconnecting...")
379
367
  time.sleep(0.05)
380
368
  elif self.consecutive_frame_failures <= self.max_consecutive_failures:
381
- # 51-150 failures: more frequent logging, longer wait
382
- if self.consecutive_frame_failures % 20 == 1: # Log every 20th failure
369
+ if self.consecutive_frame_failures % 20 == 1:
383
370
  logging.warning(f"⚠️ Persistent frame issues for {worker_source_id} (attempt {self.consecutive_frame_failures}). Checking stream health...")
384
371
  self._log_stream_diagnostics(video_manager, worker_source_id)
385
-
386
- # Attempt HEVC recovery on severe persistent failures (every 60 failures to avoid too frequent reconnections)
387
372
  if self.consecutive_frame_failures % 60 == 1:
388
- # Check if we should attempt HEVC recovery based on error patterns and cooldown
389
373
  if self._should_attempt_hevc_recovery(video_manager, worker_source_id):
390
- logging.info(f"🔧 Attempting HEVC-specific recovery for persistent frame failures...")
391
- recovery_success = self._handle_hevc_recovery(video_manager, worker_source_id)
392
- if recovery_success:
393
- logging.info(f"✅ HEVC recovery successful, continuing pipeline...")
394
- return True # Continue processing after successful recovery
395
-
374
+ logging.info("🔧 Attempting HEVC-specific recovery for persistent frame failures...")
375
+ if self._handle_hevc_recovery(video_manager, worker_source_id):
376
+ logging.info("✅ HEVC recovery successful, continuing pipeline...")
377
+ return True
396
378
  time.sleep(0.1)
397
379
  else:
398
- # Over max failures: critical logging and stop
399
380
  logging.error(f"❌ Too many consecutive frame failures for {worker_source_id} ({self.consecutive_frame_failures}). Stopping pipeline.")
400
381
  self._log_stream_diagnostics(video_manager, worker_source_id)
401
382
  return False
@@ -403,12 +384,9 @@ class PipelineProcessor:
403
384
  return True
404
385
 
405
386
  def _log_stream_diagnostics(self, video_manager, worker_source_id):
406
- """Log diagnostic information about the stream state."""
407
387
  try:
408
388
  stream_url = video_manager.get_stream_url(worker_source_id)
409
389
  is_file = video_manager.is_video_file(worker_source_id)
410
-
411
- # Get stream object for more detailed diagnostics
412
390
  if hasattr(video_manager, 'streams') and worker_source_id in video_manager.streams:
413
391
  stream = video_manager.streams[worker_source_id]
414
392
  state = stream.get_state() if hasattr(stream, 'get_state') else "unknown"
@@ -421,57 +399,44 @@ class PipelineProcessor:
421
399
  logging.info(f" Connected: {is_connected}")
422
400
  logging.info(f" Time since last frame: {time.time() - self.last_successful_frame_time:.1f}s")
423
401
 
424
- # Check for HEVC/codec specific issues
425
402
  if hasattr(stream, 'get_codec_info'):
426
403
  codec_info = stream.get_codec_info()
427
404
  if codec_info:
428
405
  logging.info(f" Codec: {codec_info}")
429
406
  if 'hevc' in str(codec_info).lower() or 'h265' in str(codec_info).lower():
430
- logging.warning(f" ⚠️ HEVC stream detected - may experience QP delta or POC reference errors")
407
+ logging.warning(" ⚠️ HEVC stream detected - potential QP/POC errors")
431
408
 
432
- # Log recent error patterns if available
433
409
  if hasattr(stream, 'get_recent_errors'):
434
410
  recent_errors = stream.get_recent_errors()
435
411
  if recent_errors:
436
412
  hevc_errors = [err for err in recent_errors if 'cu_qp_delta' in str(err.get('error', '')) or 'Could not find ref with POC' in str(err.get('error', ''))]
437
413
  if hevc_errors:
438
- logging.warning(f" 🔥 Recent HEVC errors detected: {len(hevc_errors)} codec-related errors")
414
+ logging.warning(f" 🔥 Recent HEVC errors: {len(hevc_errors)}")
439
415
  self.hevc_error_count += len(hevc_errors)
440
-
441
- # Log sample of recent HEVC errors for debugging
442
- for i, err in enumerate(hevc_errors[-3:]): # Show last 3 errors
416
+ for i, err in enumerate(hevc_errors[-3:]):
443
417
  logging.warning(f" 🔥 HEVC Error {i+1}: {err.get('error', '')[:100]}...")
444
418
  else:
445
- logging.info(f"📊 Stream {worker_source_id} not found in regular streams, checking direct device streams...")
446
-
419
+ logging.info(f"📊 Stream {worker_source_id} not found in registry; checking device directly...")
447
420
  except Exception as e:
448
421
  logging.error(f"Error getting stream diagnostics: {e}")
449
422
 
450
423
  def _should_attempt_hevc_recovery(self, video_manager, worker_source_id) -> bool:
451
- """
452
- Determine if HEVC recovery should be attempted based on error patterns and cooldown.
453
- """
454
424
  current_time = time.time()
455
-
456
- # Check cooldown period
457
425
  if current_time - self.last_hevc_recovery < self.hevc_recovery_cooldown:
458
426
  logging.debug(f"HEVC recovery on cooldown ({current_time - self.last_hevc_recovery:.1f}s elapsed)")
459
427
  return False
460
428
 
461
- # Check if stream has HEVC-related errors
462
429
  if hasattr(video_manager, 'streams') and worker_source_id in video_manager.streams:
463
430
  stream = video_manager.streams[worker_source_id]
464
431
  if hasattr(stream, 'get_recent_errors'):
465
- recent_errors = stream.get_recent_errors(max_age_seconds=60) # Last minute
432
+ recent_errors = stream.get_recent_errors(max_age_seconds=60)
466
433
  hevc_errors = [err for err in recent_errors if
467
- 'cu_qp_delta' in str(err.get('error', '')) or
468
- 'Could not find ref with POC' in str(err.get('error', ''))]
469
-
470
- if len(hevc_errors) >= 3: # Threshold for HEVC errors
434
+ 'cu_qp_delta' in str(err.get('error', '')) or
435
+ 'Could not find ref with POC' in str(err.get('error', ''))]
436
+ if len(hevc_errors) >= 3:
471
437
  logging.info(f"HEVC recovery warranted: {len(hevc_errors)} HEVC errors in last minute")
472
438
  return True
473
439
 
474
- # Check if we have accumulated enough general HEVC errors
475
440
  if self.hevc_error_count >= 5:
476
441
  logging.info(f"HEVC recovery warranted: {self.hevc_error_count} total HEVC errors detected")
477
442
  return True
@@ -479,62 +444,46 @@ class PipelineProcessor:
479
444
  return False
480
445
 
481
446
  def _handle_hevc_recovery(self, video_manager, worker_source_id):
482
- """
483
- Handle HEVC-specific recovery strategies for codec errors.
484
- This method attempts to recover from common HEVC issues like QP delta and POC reference errors.
485
- """
486
447
  try:
487
- self.last_hevc_recovery = time.time() # Update recovery timestamp
448
+ self.last_hevc_recovery = time.time()
488
449
  logging.info(f"🔧 Attempting HEVC stream recovery for {worker_source_id}")
489
-
490
- # Get the stream URL for recreation
491
450
  stream_url = video_manager.get_stream_url(worker_source_id)
492
451
  if not stream_url:
493
452
  logging.error(f" Cannot get stream URL for {worker_source_id}")
494
453
  return False
495
454
 
496
- # Strategy 1: Remove and re-add the stream to reset decoder state
497
- logging.info(f" Recreating stream {worker_source_id} to reset decoder state...")
498
455
  video_manager.remove_stream(worker_source_id)
499
- time.sleep(1.0) # Give time for cleanup
500
-
501
- # Re-add the stream
456
+ time.sleep(1.0)
502
457
  video_manager.add_stream(worker_source_id, stream_url)
503
- time.sleep(2.0) # Give time for stream to initialize
458
+ time.sleep(2.0)
504
459
 
505
- # Strategy 2: Check if stream was successfully recreated
506
460
  if not video_manager.has_stream(worker_source_id):
507
461
  logging.error(f" Failed to recreate stream {worker_source_id}")
508
462
  return False
509
463
 
510
- # Strategy 3: Reset failure counters and error counts after recovery attempt
511
464
  self.reset_frame_failure_counters()
512
- self.hevc_error_count = 0 # Reset HEVC error counter
513
-
465
+ self.hevc_error_count = 0
514
466
  logging.info(f"✅ HEVC recovery attempt completed for {worker_source_id}")
515
467
  return True
516
-
517
468
  except Exception as e:
518
469
  logging.error(f"❌ HEVC recovery failed for {worker_source_id}: {e}")
519
470
  return False
520
471
 
521
472
  def stop(self):
522
473
  """Stops the Pipeline processor and cleans up resources."""
523
- if not self.running: # Prevent multiple stops
474
+ if not self.running:
524
475
  return
525
-
526
476
  logging.info("🛑 Stopping PipelineProcessor...")
527
477
  self.running = False
528
478
 
529
- # Stop RTMP streamer first
530
479
  if hasattr(self, 'rtmp_streamer') and self.rtmp_streamer:
531
480
  try:
532
481
  self.rtmp_streamer.stop_stream()
533
- self.rtmp_streamer = None
534
482
  except Exception as e:
535
483
  logging.error(f"Error stopping RTMP streamer: {e}")
484
+ finally:
485
+ self.rtmp_streamer = None
536
486
 
537
- # Clear frame queue before joining thread
538
487
  try:
539
488
  while True:
540
489
  try:
@@ -544,10 +493,9 @@ class PipelineProcessor:
544
493
  except Exception as e:
545
494
  logging.error(f"Error clearing frame queue: {e}")
546
495
 
547
- # Wait for detection thread with increased timeout
548
496
  if self.detection_thread and self.detection_thread.is_alive():
549
497
  try:
550
- self.detection_thread.join(timeout=5.0) # Increased timeout
498
+ self.detection_thread.join(timeout=5.0)
551
499
  if self.detection_thread.is_alive():
552
500
  logging.warning("Detection thread did not terminate cleanly")
553
501
  except Exception as e:
@@ -555,10 +503,8 @@ class PipelineProcessor:
555
503
  finally:
556
504
  self.detection_thread = None
557
505
 
558
- # Clear tracked objects
559
- self.tracked_objects_render.clear() # Use clear() instead of reassignment
506
+ self.tracked_objects_render.clear()
560
507
 
561
- # Close video debugger windows last
562
508
  try:
563
509
  if hasattr(self, 'video_debugger'):
564
510
  self.video_debugger.close_all()
@@ -568,34 +514,24 @@ class PipelineProcessor:
568
514
  logging.info("✅ PipelineProcessor stopped successfully")
569
515
 
570
516
  def _get_detection_interval(self):
571
- """
572
- Get detection interval from configuration.
573
- Converts frames per second to seconds per frame.
574
- """
575
517
  config = self.config_manager.get_feature_config("processing_speed")
576
518
  fps = config.get("decimal", 1.0)
577
-
578
519
  if fps <= 0:
579
- return 1 / 10 # Default to 10 frame per second if fps is zero or negative
580
-
581
- return 1.0 / fps # Convert fps to seconds per frame
520
+ return 1.0 / 10.0 # default 10 fps
521
+ return 1.0 / fps
582
522
 
583
523
  def enable_debug(self):
584
- """Enable debug mode for this pipeline."""
585
524
  self.debug_flag = True
586
- # Reset failure counters when debug is enabled as it may help with recovery
587
525
  self.consecutive_frame_failures = 0
588
526
  self.last_successful_frame_time = time.time()
589
527
 
590
528
  def reset_frame_failure_counters(self):
591
- """Reset frame failure counters. Can be called externally to help with recovery."""
592
529
  logging.info(f"🔄 Resetting frame failure counters for pipeline {self.pipeline_id}")
593
530
  self.consecutive_frame_failures = 0
594
531
  self.last_successful_frame_time = time.time()
595
- self.hevc_error_count = 0 # Also reset HEVC error count
532
+ self.hevc_error_count = 0
596
533
 
597
534
  def get_hevc_diagnostics(self, video_manager) -> dict:
598
- """Get HEVC-specific diagnostics for the pipeline."""
599
535
  diagnostics = {
600
536
  'hevc_error_count': self.hevc_error_count,
601
537
  'last_hevc_recovery': self.last_hevc_recovery,
@@ -604,20 +540,15 @@ class PipelineProcessor:
604
540
  'consecutive_failures': self.consecutive_frame_failures,
605
541
  'time_since_last_frame': time.time() - self.last_successful_frame_time,
606
542
  }
607
-
608
- # Add stream-specific HEVC information
609
543
  if hasattr(video_manager, 'streams') and self.worker_source_id in video_manager.streams:
610
544
  stream = video_manager.streams[self.worker_source_id]
611
-
612
545
  if hasattr(stream, 'get_codec_info'):
613
546
  diagnostics['codec'] = stream.get_codec_info()
614
-
615
547
  if hasattr(stream, 'get_recent_errors'):
616
- recent_errors = stream.get_recent_errors(max_age_seconds=300) # Last 5 minutes
548
+ recent_errors = stream.get_recent_errors(max_age_seconds=300)
617
549
  hevc_errors = [err for err in recent_errors if
618
- 'cu_qp_delta' in str(err.get('error', '')) or
619
- 'Could not find ref with POC' in str(err.get('error', ''))]
550
+ 'cu_qp_delta' in str(err.get('error', '')) or
551
+ 'Could not find ref with POC' in str(err.get('error', ''))]
620
552
  diagnostics['recent_hevc_errors'] = len(hevc_errors)
621
553
  diagnostics['total_recent_errors'] = len(recent_errors)
622
-
623
554
  return diagnostics