matrice-analytics 0.1.44__py3-none-any.whl → 0.1.46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of matrice-analytics might be problematic. Click here for more details.

@@ -143,4 +143,4 @@ def get_usecase_from_app_name(app_name: str) -> str:
143
143
 
144
144
  def get_category_from_app_name(app_name: str) -> str:
145
145
  normalized_app_name = app_name.lower().replace(" ", "_").replace("-", "_")
146
- return APP_NAME_TO_CATEGORY.get(app_name, APP_NAME_TO_CATEGORY.get(normalized_app_name))
146
+ return APP_NAME_TO_CATEGORY.get(app_name, APP_NAME_TO_CATEGORY.get(normalized_app_name))
@@ -24,7 +24,7 @@ subprocess.run(
24
24
  cmd,
25
25
  stdout=log_file,
26
26
  stderr=subprocess.STDOUT,
27
- preexec_fn=os.setpgrp
27
+ #preexec_fn=os.setpgrp
28
28
  )
29
29
  log_file.close()
30
30
 
@@ -635,7 +635,9 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
635
635
  )
636
636
  self.logger.debug("Applied category filtering")
637
637
 
638
-
638
+ print("------------------TILL TRACKER MS----------------------------")
639
+ print("LATENCY:",(time.time() - processing_start)*1000,"| Throughput fps:",(1.0 / (time.time() - processing_start)) if (time.time() - processing_start) > 0 else None)
640
+ print("------------------TILL TRACKER MS----------------------------")
639
641
  # Advanced tracking (BYTETracker-like) - only if enabled
640
642
  if config.enable_face_tracking:
641
643
  from ..advanced_tracker import AdvancedTracker
@@ -675,6 +677,10 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
675
677
  recognized_persons = {}
676
678
  current_frame_staff_details = {}
677
679
 
680
+ print("------------------TRACKER INIT END----------------------------")
681
+ print("LATENCY:",(time.time() - processing_start)*1000,"| Throughput fps:",(1.0 / (time.time() - processing_start)) if (time.time() - processing_start) > 0 else None)
682
+ print("------------------TRACKER INIT END----------------------------")
683
+
678
684
  # Process face recognition for each detection (if enabled)
679
685
  if config.enable_face_recognition:
680
686
  face_recognition_result = await self._process_face_recognition(
@@ -689,6 +695,10 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
689
695
  detection["recognition_status"] = "disabled"
690
696
  detection["enrolled"] = False
691
697
 
698
+ print("------------------FACE RECONG CONFIG ENABLED----------------------------")
699
+ print("LATENCY:",(time.time() - processing_start)*1000,"| Throughput fps:",(1.0 / (time.time() - processing_start)) if (time.time() - processing_start) > 0 else None)
700
+ print("------------------FACE RECONG CONFIG ENABLED----------------------------")
701
+
692
702
  # Update tracking state for total count per label
693
703
  self._update_tracking_state(processed_data)
694
704
 
@@ -721,6 +731,10 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
721
731
  current_recognized_count, current_unknown_count, recognized_persons
722
732
  ))
723
733
 
734
+ print("------------------TILL FACE RECOG SUMMARY----------------------------")
735
+ print("LATENCY:",(time.time() - processing_start)*1000,"| Throughput fps:",(1.0 / (time.time() - processing_start)) if (time.time() - processing_start) > 0 else None)
736
+ print("------------------TILL FACE RECOG SUMMARY----------------------------")
737
+
724
738
  # Add detections to the counting summary (standard pattern for detection use cases)
725
739
  # Ensure display label is present for UI (does not affect logic/counters)
726
740
  for _d in processed_data:
@@ -751,6 +765,12 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
751
765
  business_analytics_list[0] if business_analytics_list else {}
752
766
  )
753
767
  summary = summary_list[0] if summary_list else {}
768
+
769
+ print("------------------TILL TRACKING STATS----------------------------")
770
+ print("LATENCY:",(time.time() - processing_start)*1000,"| Throughput fps:",(1.0 / (time.time() - processing_start)) if (time.time() - processing_start) > 0 else None)
771
+ print("------------------TILL TRACKING STATS----------------------------")
772
+
773
+
754
774
  agg_summary = {
755
775
  str(frame_number): {
756
776
  "incidents": incidents,
@@ -863,6 +883,7 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
863
883
  """Process face recognition for each detection with embeddings"""
864
884
 
865
885
  # Initialize face client only when needed and if credentials are available
886
+ st=time.time()
866
887
  if not self.face_client:
867
888
  try:
868
889
  self.face_client = self._get_facial_recognition_client(config)
@@ -872,6 +893,9 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
872
893
  )
873
894
  # No client available, return empty list (no results)
874
895
  return []
896
+ print("------------------FACE RECOG CLIENT INIT----------------------------")
897
+ print("LATENCY:",(time.time() - st)*1000,"| Throughput fps:",(1.0 / (time.time() - st)) if (time.time() - st) > 0 else None)
898
+ print("------------------FACE RECOG CLIENT INIT----------------------------")
875
899
 
876
900
  # Initialize unknown faces storage if not exists
877
901
  if not hasattr(self, "unknown_faces_storage"):
@@ -914,11 +938,16 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
914
938
  for detection in detections:
915
939
 
916
940
  # Process each detection sequentially with await to preserve order
941
+ st=time.time()
917
942
  processed_detection = await self._process_face(
918
943
  detection, current_frame, location, current_timestamp, config,
919
944
  current_recognized_count, current_unknown_count,
920
945
  recognized_persons, current_frame_staff_details
921
946
  )
947
+ print("------------------WHOLE FACE RECOG PROCESSING DETECTION----------------------------")
948
+ print("LATENCY:",(time.time() - st)*1000,"| Throughput fps:",(1.0 / (time.time() - st)) if (time.time() - st) > 0 else None)
949
+ print("------------------FACE RECOG PROCESSING DETECTION----------------------------")
950
+
922
951
  # Include both known and unknown faces in final detections (maintains original order)
923
952
  if processed_detection:
924
953
  final_detections.append(processed_detection)
@@ -948,12 +977,16 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
948
977
  ) -> Dict:
949
978
 
950
979
  # Extract and validate embedding using EmbeddingManager
980
+ st=time.time()
951
981
  detection, embedding = self.embedding_manager.extract_embedding_from_detection(detection)
952
982
  if not embedding:
953
983
  return None
954
984
 
955
985
  # Internal tracker-provided ID (from AdvancedTracker; ignore upstream IDs entirely)
956
986
  track_id = detection.get("track_id")
987
+ print("------------------FACE RECOG EMBEDDING EXTRACTION----------------------------")
988
+ print("LATENCY:",(time.time() - st)*1000,"| Throughput fps:",(1.0 / (time.time() - st)) if (time.time() - st) > 0 else None)
989
+ print("------------------FACE RECOG EMBEDDING EXTRACTION----------------------------")
957
990
 
958
991
  # Determine if detection is eligible for recognition (similar to compare_similarity gating)
959
992
  bbox = detection.get("bounding_box", {}) or {}
@@ -1016,6 +1049,7 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1016
1049
  except Exception:
1017
1050
  pass
1018
1051
  else: #if eligible for recognition
1052
+ st=time.time()
1019
1053
  staff_id, person_name, similarity_score, employee_id, staff_details, detection_type = await self.temporal_identity_manager.update(
1020
1054
  track_id=track_key,
1021
1055
  emb=embedding,
@@ -1023,6 +1057,9 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1023
1057
  location=location,
1024
1058
  timestamp=current_timestamp,
1025
1059
  )
1060
+ print("------------------FACE RECOG TEMPORAL IDENTITY MANAGER UPDATE----------------------------")
1061
+ print("LATENCY:",(time.time() - st)*1000,"| Throughput fps:",(1.0 / (time.time() - st)) if (time.time() - st) > 0 else None)
1062
+ print("------------------FACE RECOG TEMPORAL IDENTITY MANAGER UPDATE----------------------------")
1026
1063
  except Exception as e:
1027
1064
  self.logger.warning(f"TemporalIdentityManager update failed: {e}")
1028
1065
 
@@ -1096,14 +1133,22 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1096
1133
  and employee_id
1097
1134
  and not str(employee_id).startswith("unknown_")
1098
1135
  ):
1136
+ st=time.time()
1099
1137
  await self.people_activity_logging.enqueue_detection(
1100
1138
  detection=detection,
1101
1139
  current_frame=current_frame,
1102
1140
  location=location,
1103
1141
  )
1142
+ print("------------------FACE RECOG ENQUEUEING DETECTION FOR ACTIVITY LOGGING----------------------------")
1143
+ print("LATENCY:",(time.time() - st)*1000,"| Throughput fps:",(1.0 / (time.time() - st)) if (time.time() - st) > 0 else None)
1144
+ print("------------------FACE RECOG ENQUEUEING DETECTION FOR ACTIVITY LOGGING----------------------------")
1145
+
1104
1146
  self.logger.debug(f"Enqueued known face detection for activity logging: {detection.get('person_name', 'Unknown')}")
1105
1147
  except Exception as e:
1106
1148
  self.logger.error(f"Error enqueueing detection for activity logging: {e}")
1149
+ print("------------------FACE RECOG ENQUEUEING DETECTION FOR ACTIVITY LOGGING----------------------------")
1150
+ print("LATENCY:",(time.time() - st)*1000,"| Throughput fps:",(1.0 / (time.time() - st)) if (time.time() - st) > 0 else None)
1151
+ print("------------------FACE RECOG ENQUEUEING DETECTION FOR ACTIVITY LOGGING----------------------------")
1107
1152
 
1108
1153
  return detection
1109
1154
 
@@ -587,4 +587,4 @@ def create_face_client(account_number: str = None, access_key: str = None,
587
587
  project_id=project_id,
588
588
  server_id=server_id,
589
589
  session=session
590
- )
590
+ )
@@ -318,4 +318,4 @@ class PeopleActivityLogging:
318
318
  try:
319
319
  self.stop_background_processing()
320
320
  except:
321
- pass
321
+ pass
@@ -1,6 +1,7 @@
1
1
  import easyocr
2
2
  import numpy as np
3
3
  import torch
4
+ from matrice_common.utils import log_errors
4
5
 
5
6
  class EasyOCRExtractor:
6
7
  def __init__(self, lang=['en', 'hi', 'ar'], gpu=False, model_storage_directory=None,
@@ -30,7 +31,8 @@ class EasyOCRExtractor:
30
31
  self.recognizer = recognizer
31
32
  self.verbose = verbose
32
33
  self.reader = None
33
-
34
+
35
+ @log_errors(default_return=None, raise_exception=True, service_name="py_analytics", log_error=True)
34
36
  def setup(self):
35
37
  """
36
38
  Initializes the EasyOCR reader if not already initialized.
@@ -0,0 +1,165 @@
1
+ import os
2
+ import sys
3
+ import cv2
4
+ import json
5
+ import importlib
6
+ import argparse
7
+ from ultralytics import YOLO
8
+ from src.matrice_analytics.post_processing.core.base import ProcessingContext
9
+
10
+
11
+ class UseCaseTestProcessor:
12
+ """
13
+ A flexible YOLO-based video processor for testing different post-processing use cases.
14
+ """
15
+
16
+ def __init__(self, file_name, config_name, usecase_name, model_path, video_path, post_process=None, max_frames=None):
17
+ self.file_name = file_name
18
+ self.config_name = config_name
19
+ self.usecase_name = usecase_name
20
+ self.model_path = model_path
21
+ self.video_path = video_path
22
+ self.post_process = post_process
23
+ self.max_frames = max_frames
24
+ self.json_dir = "jsons"
25
+
26
+ self._setup_environment()
27
+ self.ConfigClass, self.UsecaseClass = self._load_usecase()
28
+ self.config = self._initialize_config()
29
+ self.processor = self.UsecaseClass()
30
+ self.model = YOLO(self.model_path)
31
+ os.makedirs(self.json_dir, exist_ok=True)
32
+
33
+ def _setup_environment(self):
34
+ """Ensure project root is added to sys.path."""
35
+ project_root = os.path.abspath("/content/py_analytics")
36
+ if project_root not in sys.path:
37
+ sys.path.append(project_root)
38
+
39
+ def _load_usecase(self):
40
+ """Dynamically import config and usecase classes."""
41
+ module_path = f"src.matrice_analytics.post_processing.usecases.{self.file_name}"
42
+ module = importlib.import_module(module_path)
43
+ return getattr(module, self.config_name), getattr(module, self.usecase_name)
44
+
45
+ def _initialize_config(self):
46
+ """Initialize config object, applying overrides if provided."""
47
+ if self.post_process:
48
+ return self.ConfigClass(**self.post_process)
49
+ return self.ConfigClass()
50
+
51
+ def _serialize_result(self, result):
52
+ """Convert result object into JSON-serializable dict."""
53
+ def to_serializable(obj):
54
+ if hasattr(obj, "to_dict"):
55
+ return obj.to_dict()
56
+ if hasattr(obj, "__dict__"):
57
+ return obj.__dict__
58
+ return str(obj)
59
+ return json.loads(json.dumps(result, default=to_serializable))
60
+
61
+
62
+ def process_video(self):
63
+ """Run YOLO inference on video and post-process frame by frame."""
64
+ cap = cv2.VideoCapture(self.video_path)
65
+ if not cap.isOpened():
66
+ raise ValueError(f"Failed to open video at {self.video_path}")
67
+
68
+ frame_idx = 0
69
+ stream_info = {
70
+ 'input_settings': {
71
+ 'start_frame': 0,
72
+ 'original_fps': cap.get(cv2.CAP_PROP_FPS),
73
+ 'camera_info': {'id': 'cam1', 'name': 'Test Camera'}
74
+ }
75
+ }
76
+
77
+ print(f"\nStarting video processing: {self.video_path}")
78
+ print(f"Model: {self.model_path}")
79
+ print(f"Output directory: {self.json_dir}\n")
80
+
81
+ while cap.isOpened():
82
+ ret, frame = cap.read()
83
+ if not ret:
84
+ break
85
+
86
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
87
+ results = self.model(frame_rgb)
88
+
89
+ detections = []
90
+ for xyxy, conf, cls in zip(results[0].boxes.xyxy, results[0].boxes.conf, results[0].boxes.cls):
91
+ x1, y1, x2, y2 = xyxy.tolist()
92
+ detections.append({
93
+ 'category_id': int(cls),
94
+ 'confidence': conf.item(),
95
+ 'bounding_box': {
96
+ 'xmin': int(x1),
97
+ 'ymin': int(y1),
98
+ 'xmax': int(x2),
99
+ 'ymax': int(y2)
100
+ }
101
+ })
102
+
103
+ success, encoded_image = cv2.imencode(".jpg", frame)
104
+ input_bytes = encoded_image.tobytes() if success else None
105
+
106
+ try:
107
+ result = self.processor.process(
108
+ detections, self.config, input_bytes, ProcessingContext(), stream_info
109
+ )
110
+ except TypeError:
111
+ result = self.processor.process(
112
+ detections, self.config, ProcessingContext(), stream_info
113
+ )
114
+
115
+ json_path = os.path.join(self.json_dir, f"frame_{frame_idx:04d}.json")
116
+ with open(json_path, "w") as f:
117
+ json.dump(self._serialize_result(result), f, indent=2)
118
+
119
+ print(f"Frame {frame_idx} processed — detections: {len(detections)} — saved: {json_path}")
120
+
121
+ frame_idx += 1
122
+ stream_info['input_settings']['start_frame'] += 1
123
+
124
+ if self.max_frames and frame_idx >= self.max_frames:
125
+ print(f"\nMax frame limit ({self.max_frames}) reached.")
126
+ break
127
+
128
+ cap.release()
129
+ print(f"\nProcessing complete. JSON outputs saved in: {self.json_dir}")
130
+
131
+
132
+ def main():
133
+ parser = argparse.ArgumentParser(description="YOLO Use Case Test Processor")
134
+
135
+ parser.add_argument("--file_name", type=str, required=True,
136
+ help="Usecase file name under src/matrice_analytics/post_processing/usecases/")
137
+ parser.add_argument("--config_name", type=str, required=True,
138
+ help="Config class name (e.g., PeopleCountingConfig)")
139
+ parser.add_argument("--usecase_name", type=str, required=True,
140
+ help="Use case class name (e.g., PeopleCountingUseCase)")
141
+ parser.add_argument("--model_path", type=str, required=True,
142
+ help="Path to YOLO model file (.pt)")
143
+ parser.add_argument("--video_path", type=str, required=True,
144
+ help="Path to input video")
145
+ parser.add_argument("--post_process", type=json.loads, default=None,
146
+ help="JSON string for config overrides, e.g. '{\"min_confidence\": 0.5}'")
147
+ parser.add_argument("--max_frames", type=int, default=None,
148
+ help="Limit number of frames processed")
149
+
150
+ args = parser.parse_args()
151
+
152
+ processor = UseCaseTestProcessor(
153
+ file_name=args.file_name,
154
+ config_name=args.config_name,
155
+ usecase_name=args.usecase_name,
156
+ model_path=args.model_path,
157
+ video_path=args.video_path,
158
+ post_process=args.post_process,
159
+ max_frames=args.max_frames
160
+ )
161
+ processor.process_video()
162
+
163
+
164
+ if __name__ == "__main__":
165
+ main()
@@ -11,20 +11,21 @@ subprocess.run(
11
11
  cmd,
12
12
  stdout=log_file,
13
13
  stderr=subprocess.STDOUT,
14
- preexec_fn=os.setpgrp
14
+ # preexec_fn=os.setpgrp
15
15
  )
16
16
  cmd = ["pip", "install", "httpx", "aiohttp", "filterpy"]
17
17
  subprocess.run(
18
18
  cmd,
19
19
  stdout=log_file,
20
20
  stderr=subprocess.STDOUT,
21
- preexec_fn=os.setpgrp
21
+ # preexec_fn=os.setpgrp
22
22
  )
23
23
  log_file.close()
24
24
 
25
25
  import numpy as np
26
26
  from typing import List, Dict, Tuple, Optional
27
27
  from dataclasses import dataclass, field
28
+ from pathlib import Path
28
29
  import cv2
29
30
  import io
30
31
  import threading
@@ -371,7 +372,7 @@ class ClipProcessor:
371
372
  cmd,
372
373
  stdout=log_file,
373
374
  stderr=subprocess.STDOUT,
374
- preexec_fn=os.setpgrp
375
+ # preexec_fn=os.setpgrp
375
376
  )
376
377
 
377
378
  # Determine and enforce providers (prefer CUDA only)
@@ -465,4 +465,4 @@ def process_video_with_color_detection(
465
465
  classifier = VideoColorClassifier(top_k_colors, min_confidence)
466
466
  return classifier.process_video_with_predictions(
467
467
  video_bytes, yolo_predictions, output_dir, fps
468
- )
468
+ )
@@ -1144,4 +1144,3 @@ class FireSmokeUseCase(BaseProcessor):
1144
1144
  return (1,1)
1145
1145
 
1146
1146
 
1147
-
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: matrice_analytics
3
- Version: 0.1.44
3
+ Version: 0.1.46
4
4
  Summary: Common server utilities for Matrice.ai services
5
5
  Author-email: "Matrice.ai" <dipendra@matrice.ai>
6
6
  License-Expression: MIT
@@ -12,7 +12,7 @@ matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py,sh
12
12
  matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py,sha256=jHPriRLorLuiC8km0MFNS96w121tKxd7t5GQl7I5kKE,3494
13
13
  matrice_analytics/post_processing/README.md,sha256=bDszazvqV5xbGhMM6hDaMctIyk5gox9bADo2IZZ9Goo,13368
14
14
  matrice_analytics/post_processing/__init__.py,sha256=dxGBUQaRCGndQmXpYAWqUhDeUZAcxU-_6HFnm3GRDRA,29417
15
- matrice_analytics/post_processing/config.py,sha256=V0s86qNapyDE6Q81ZS_1uzNqAjz-vc5L-9Tb33XaLEo,6771
15
+ matrice_analytics/post_processing/config.py,sha256=XQLl1bW3X0vqb--b_OAQcvSP0JKPGEcLUdHE8NRnqVs,6770
16
16
  matrice_analytics/post_processing/post_processor.py,sha256=F838_vc7p9tjcp-vTMgTbpHqQcLX94xhL9HM06Wvpo8,44384
17
17
  matrice_analytics/post_processing/advanced_tracker/README.md,sha256=RM8dynVoUWKn_hTbw9c6jHAbnQj-8hEAXnmuRZr2w1M,22485
18
18
  matrice_analytics/post_processing/advanced_tracker/__init__.py,sha256=tAPFzI_Yep5TLX60FDwKqBqppc-EbxSr0wNsQ9DGI1o,423
@@ -29,11 +29,11 @@ matrice_analytics/post_processing/core/config_utils.py,sha256=QuAS-_JKSoNOtfUWgr
29
29
  matrice_analytics/post_processing/face_reg/__init__.py,sha256=yntaiGlW9vdjBpPZQXNuovALihJPzRlFyUE88l3MhBA,1364
30
30
  matrice_analytics/post_processing/face_reg/compare_similarity.py,sha256=NlFc8b2a74k0PqSFAbuM_fUbA1BT3pr3VUgvSqRpJzQ,23396
31
31
  matrice_analytics/post_processing/face_reg/embedding_manager.py,sha256=qbh0df3-YbE0qvFDQvjpCg-JrsCZRJ5capjQ2LPOj1k,35619
32
- matrice_analytics/post_processing/face_reg/face_recognition.py,sha256=eXzd2hc3qYtYLOxc32l5KAvqS1pIZQTKUhG4wPRMwOc,92552
33
- matrice_analytics/post_processing/face_reg/face_recognition_client.py,sha256=YaC73mN0yrHZYkjJ7nZIZen5YCVOwEX2gn6I1XpHTW0,27575
34
- matrice_analytics/post_processing/face_reg/people_activity_logging.py,sha256=NhJXy9jCy_mlZiDXv8IeGyrRN_TL0kKCe3ZOlaNbZUw,13676
32
+ matrice_analytics/post_processing/face_reg/face_recognition.py,sha256=dLopUldzBCsRw1Zu0jewi1xf-JeUbgFx7_3L5pZ8wAg,96510
33
+ matrice_analytics/post_processing/face_reg/face_recognition_client.py,sha256=VaMhjk_tk9zzcB4Vp_FCA_MtgtHtfpzhFJA2caNx9YU,27574
34
+ matrice_analytics/post_processing/face_reg/people_activity_logging.py,sha256=vZbIvkK1h3h58ROeF0_ygF3lqr19O2h5222bN8XyIis,13675
35
35
  matrice_analytics/post_processing/ocr/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
- matrice_analytics/post_processing/ocr/easyocr_extractor.py,sha256=FwVoUATYdiZtfhSAoiyCo_9dgA786pFZfONx6tsQOfE,11403
36
+ matrice_analytics/post_processing/ocr/easyocr_extractor.py,sha256=RMrRoGb2gMcJEGouQn8U9cCgCLXPT7qRa8liI4LNxFM,11555
37
37
  matrice_analytics/post_processing/ocr/postprocessing.py,sha256=RILArp8I9WRH7bALVZ9wPGc-aR7YMdqV1ndOOIcOnGQ,12309
38
38
  matrice_analytics/post_processing/ocr/preprocessing.py,sha256=LZolyUw4syMU6Q0V6SQzvkITVSmlkvwu0p9cUNhkbgA,1790
39
39
  matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py,sha256=OHd4VO0dI8daHojE5900DvreiDT6SGOSZF6VvxU0Tx4,184
@@ -80,6 +80,7 @@ matrice_analytics/post_processing/test_cases/test_customer_service.py,sha256=gfJ
80
80
  matrice_analytics/post_processing/test_cases/test_data_generators.py,sha256=RYeY1pCQdPSAsnevdMkqudBOCIDBrWS2-cZK-hcOSlU,20214
81
81
  matrice_analytics/post_processing/test_cases/test_people_counting.py,sha256=1QINfIvtQ5idIMHaojgilRcSiCCVqZPd1fyGOM08HiE,19194
82
82
  matrice_analytics/post_processing/test_cases/test_processor.py,sha256=Mi0dRkpszChMS1SOPVBHj2bgkRt93Xxl94mvpQ-5yws,19799
83
+ matrice_analytics/post_processing/test_cases/test_usecases.py,sha256=e09c9JaOhtiwO5_TXDV5V_dPsc_jJBG36egu-WM02mE,6331
83
84
  matrice_analytics/post_processing/test_cases/test_utilities.py,sha256=zUtBqwELjovkhQfhn1vM-y7aH04z9sFvt6LIpXXBFSE,13415
84
85
  matrice_analytics/post_processing/test_cases/test_utils.py,sha256=lgDX0vILylA6m8sG3_3kxAJ7TiDo8xkprJNfBrLoID4,29371
85
86
  matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py,sha256=bHDXxxG3QgWMFZbDuBaJWpkIvxTXsFMTqCPBCFm3SDs,30247
@@ -115,7 +116,7 @@ matrice_analytics/post_processing/usecases/face_emotion.py,sha256=eRfqBdryB0uNoO
115
116
  matrice_analytics/post_processing/usecases/face_recognition.py,sha256=T5xAuv6b9OrkmTmoXgZs4LZ5XUsbvp9xCpeLBwdu7eI,40231
116
117
  matrice_analytics/post_processing/usecases/fashion_detection.py,sha256=f9gpzMDhIW-gyn46k9jgf8nY7YeoqAnTxGOzksabFbE,40457
117
118
  matrice_analytics/post_processing/usecases/field_mapping.py,sha256=JDwYX8pd2W-waDvBh98Y_o_uchJu7wEYbFxOliA4Iq4,39822
118
- matrice_analytics/post_processing/usecases/fire_detection.py,sha256=3ZZZrIXtZ_Ui3d3nQxvj12pqFTgguRN4OiE0cQYNXhg,54614
119
+ matrice_analytics/post_processing/usecases/fire_detection.py,sha256=o57jhIrsVBRkpD4nzaUHyuBRA4YZb3yNvQz0Ri_xiVg,54613
119
120
  matrice_analytics/post_processing/usecases/flare_analysis.py,sha256=3nf4fUeUwlP_UII0h5fQkUGPXbr32ZnJjaM-dukNSP8,42680
120
121
  matrice_analytics/post_processing/usecases/flower_segmentation.py,sha256=4I7qMx9Ztxg_hy9KTVX-3qBhAN-QwDt_Yigf9fFjLus,52017
121
122
  matrice_analytics/post_processing/usecases/gas_leak_detection.py,sha256=KL2ft7fXvjTas-65-QgcJm3W8KBsrwF44qibSXjfaLc,40557
@@ -166,9 +167,9 @@ matrice_analytics/post_processing/usecases/weld_defect_detection.py,sha256=b0dAJ
166
167
  matrice_analytics/post_processing/usecases/wildlife_monitoring.py,sha256=TMVHJ5GLezmqG7DywmqbLggqNXgpsb63MD7IR6kvDkk,43446
167
168
  matrice_analytics/post_processing/usecases/windmill_maintenance.py,sha256=G1eqo3Z-HYmGJ6oeZYrpZwhpvqQ9Lc_T-6S7BLBXHeA,40498
168
169
  matrice_analytics/post_processing/usecases/wound_segmentation.py,sha256=ehNX6VuWMB3xAnCySO3ra3Tf_5FUNg5LCSdq_91h374,38342
169
- matrice_analytics/post_processing/usecases/color/clip.py,sha256=RW3XdsQR2oAMZUTTCCUPBSdAldgG3w4zki_S-hb4tVA,27561
170
+ matrice_analytics/post_processing/usecases/color/clip.py,sha256=EN8z3XIwkkmuhGcSjTWV0Os7tSVwZOL-svsxkzT21e4,27586
170
171
  matrice_analytics/post_processing/usecases/color/color_map_utils.py,sha256=SP-AEVcjLmL8rxblu-ixqUJC2fqlcr7ab4hWo4Fcr_k,2677
171
- matrice_analytics/post_processing/usecases/color/color_mapper.py,sha256=nKPc28mJLlrl2HPua5EUUMweRRSI6WrrUBkeitTm7ms,17459
172
+ matrice_analytics/post_processing/usecases/color/color_mapper.py,sha256=aUbs5bGxxKEg0CFZUIKE1zbABLr02ZB81HRUb_wdPfk,17458
172
173
  matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt,sha256=n9aR98gDkhDg_O0VhlRmxlgg0JtjmIsBdL_iXeKZBRo,524619
173
174
  matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json,sha256=j7VHQDZW6QGbCYLSMQsEW-85udKoEaDJh1blLUjiXbY,504
174
175
  matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json,sha256=LNs7gzGmDJL8HlWhPp_WH9IpPFpRJ1_czNYreABSUw4,588
@@ -188,8 +189,8 @@ matrice_analytics/post_processing/utils/format_utils.py,sha256=UTF7A5h9j0_S12xH9
188
189
  matrice_analytics/post_processing/utils/geometry_utils.py,sha256=BWfdM6RsdJTTLR1GqkWfdwpjMEjTCJyuBxA4zVGKdfk,9623
189
190
  matrice_analytics/post_processing/utils/smoothing_utils.py,sha256=78U-yucAcjUiZ0NIAc9NOUSIT0PWP1cqyIPA_Fdrjp0,14699
190
191
  matrice_analytics/post_processing/utils/tracking_utils.py,sha256=rWxuotnJ3VLMHIBOud2KLcu4yZfDp7hVPWUtNAq_2xw,8288
191
- matrice_analytics-0.1.44.dist-info/licenses/LICENSE.txt,sha256=_uQUZpgO0mRYL5-fPoEvLSbNnLPv6OmbeEDCHXhK6Qc,1066
192
- matrice_analytics-0.1.44.dist-info/METADATA,sha256=fdA7eR44gue1uKeUrOE0fDpgx2bIsqXH2-h4uOuNiDY,14378
193
- matrice_analytics-0.1.44.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
194
- matrice_analytics-0.1.44.dist-info/top_level.txt,sha256=STAPEU-e-rWTerXaspdi76T_eVRSrEfFpURSP7_Dt8E,18
195
- matrice_analytics-0.1.44.dist-info/RECORD,,
192
+ matrice_analytics-0.1.46.dist-info/licenses/LICENSE.txt,sha256=_uQUZpgO0mRYL5-fPoEvLSbNnLPv6OmbeEDCHXhK6Qc,1066
193
+ matrice_analytics-0.1.46.dist-info/METADATA,sha256=xzaR5W7et1MNAyYu-bK7bqBi_pLCs5GOfiLkBo_Olk4,14378
194
+ matrice_analytics-0.1.46.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
195
+ matrice_analytics-0.1.46.dist-info/top_level.txt,sha256=STAPEU-e-rWTerXaspdi76T_eVRSrEfFpURSP7_Dt8E,18
196
+ matrice_analytics-0.1.46.dist-info/RECORD,,