matrice-analytics 0.1.3__py3-none-any.whl → 0.1.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of matrice-analytics might be problematic. Click here for more details.
- matrice_analytics/post_processing/advanced_tracker/matching.py +3 -3
- matrice_analytics/post_processing/advanced_tracker/strack.py +1 -1
- matrice_analytics/post_processing/face_reg/compare_similarity.py +5 -5
- matrice_analytics/post_processing/face_reg/embedding_manager.py +14 -7
- matrice_analytics/post_processing/face_reg/face_recognition.py +123 -34
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +332 -82
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +29 -22
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +0 -1
- matrice_analytics/post_processing/post_processor.py +19 -5
- matrice_analytics/post_processing/usecases/color/clip.py +42 -8
- matrice_analytics/post_processing/usecases/color/color_mapper.py +2 -2
- matrice_analytics/post_processing/usecases/color_detection.py +21 -98
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +41 -386
- matrice_analytics/post_processing/usecases/flare_analysis.py +1 -56
- matrice_analytics/post_processing/usecases/license_plate_detection.py +476 -202
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +252 -11
- matrice_analytics/post_processing/usecases/people_counting.py +408 -1431
- matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +39 -10
- matrice_analytics/post_processing/utils/__init__.py +8 -8
- {matrice_analytics-0.1.3.dist-info → matrice_analytics-0.1.31.dist-info}/METADATA +1 -1
- {matrice_analytics-0.1.3.dist-info → matrice_analytics-0.1.31.dist-info}/RECORD +59 -24
- {matrice_analytics-0.1.3.dist-info → matrice_analytics-0.1.31.dist-info}/WHEEL +0 -0
- {matrice_analytics-0.1.3.dist-info → matrice_analytics-0.1.31.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice_analytics-0.1.3.dist-info → matrice_analytics-0.1.31.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,214 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Utility functions module
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import pathlib
|
|
7
|
+
import pkgutil
|
|
8
|
+
import random
|
|
9
|
+
from collections.abc import Iterator
|
|
10
|
+
from importlib import import_module
|
|
11
|
+
from typing import Optional, Union
|
|
12
|
+
|
|
13
|
+
import cv2
|
|
14
|
+
import keras
|
|
15
|
+
import numpy as np
|
|
16
|
+
import numpy.typing as npt
|
|
17
|
+
|
|
18
|
+
from fast_plate_ocr.core.process import read_and_resize_plate_image
|
|
19
|
+
from fast_plate_ocr.core.types import ImageColorMode, ImageInterpolation, PaddingColor
|
|
20
|
+
from fast_plate_ocr.train.model.config import PlateOCRConfig
|
|
21
|
+
from fast_plate_ocr.train.model.loss import cce_loss, focal_cce_loss
|
|
22
|
+
from fast_plate_ocr.train.model.metric import (
|
|
23
|
+
cat_acc_metric,
|
|
24
|
+
plate_acc_metric,
|
|
25
|
+
plate_len_acc_metric,
|
|
26
|
+
top_3_k_metric,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def one_hot_plate(plate: str, alphabet: str) -> list[list[int]]:
|
|
31
|
+
return [[0 if char != letter else 1 for char in alphabet] for letter in plate]
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def target_transform(
|
|
35
|
+
plate_text: str,
|
|
36
|
+
max_plate_slots: int,
|
|
37
|
+
alphabet: str,
|
|
38
|
+
pad_char: str,
|
|
39
|
+
) -> npt.NDArray[np.uint8]:
|
|
40
|
+
# Pad the plates which length is smaller than 'max_plate_slots'
|
|
41
|
+
plate_text = plate_text.ljust(max_plate_slots, pad_char)
|
|
42
|
+
# Generate numpy arrays with one-hot encoding of plates
|
|
43
|
+
encoded_plate = np.array(one_hot_plate(plate_text, alphabet=alphabet), dtype=np.uint8)
|
|
44
|
+
return encoded_plate
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _register_custom_keras():
|
|
48
|
+
base_pkg = "fast_plate_ocr.train.model"
|
|
49
|
+
for _, name, _ in pkgutil.walk_packages(
|
|
50
|
+
import_module(base_pkg).__path__, prefix=f"{base_pkg}."
|
|
51
|
+
):
|
|
52
|
+
if any(m in name for m in ("layers",)):
|
|
53
|
+
import_module(name)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def load_keras_model(
|
|
57
|
+
model_path: Union[str, pathlib.Path],
|
|
58
|
+
plate_config: PlateOCRConfig,
|
|
59
|
+
) -> keras.Model:
|
|
60
|
+
"""
|
|
61
|
+
Utility helper function to load the keras OCR model.
|
|
62
|
+
"""
|
|
63
|
+
_register_custom_keras()
|
|
64
|
+
custom_objects = {
|
|
65
|
+
"cce": cce_loss(
|
|
66
|
+
vocabulary_size=plate_config.vocabulary_size,
|
|
67
|
+
),
|
|
68
|
+
"focal_cce": focal_cce_loss(
|
|
69
|
+
vocabulary_size=plate_config.vocabulary_size,
|
|
70
|
+
),
|
|
71
|
+
"cat_acc": cat_acc_metric(
|
|
72
|
+
max_plate_slots=plate_config.max_plate_slots,
|
|
73
|
+
vocabulary_size=plate_config.vocabulary_size,
|
|
74
|
+
),
|
|
75
|
+
"plate_acc": plate_acc_metric(
|
|
76
|
+
max_plate_slots=plate_config.max_plate_slots,
|
|
77
|
+
vocabulary_size=plate_config.vocabulary_size,
|
|
78
|
+
),
|
|
79
|
+
"top_3_k": top_3_k_metric(
|
|
80
|
+
vocabulary_size=plate_config.vocabulary_size,
|
|
81
|
+
),
|
|
82
|
+
"plate_len_acc": plate_len_acc_metric(
|
|
83
|
+
max_plate_slots=plate_config.max_plate_slots,
|
|
84
|
+
vocabulary_size=plate_config.vocabulary_size,
|
|
85
|
+
pad_token_index=plate_config.pad_idx,
|
|
86
|
+
),
|
|
87
|
+
}
|
|
88
|
+
model = keras.models.load_model(model_path, custom_objects=custom_objects)
|
|
89
|
+
return model
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
IMG_EXTENSIONS: set[str] = {".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff", ".webp"}
|
|
93
|
+
"""Valid image extensions for the scope of this script."""
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def load_images_from_folder( # noqa: PLR0913
|
|
97
|
+
img_dir: pathlib.Path,
|
|
98
|
+
width: int,
|
|
99
|
+
height: int,
|
|
100
|
+
image_color_mode: ImageColorMode = "grayscale",
|
|
101
|
+
keep_aspect_ratio: bool = False,
|
|
102
|
+
interpolation_method: ImageInterpolation = "linear",
|
|
103
|
+
padding_color: PaddingColor = (114, 114, 114),
|
|
104
|
+
shuffle: bool = False,
|
|
105
|
+
limit: Optional[int] = None,
|
|
106
|
+
) -> Iterator[npt.NDArray]:
|
|
107
|
+
"""
|
|
108
|
+
Return all images read from a directory. This uses the same read function used during training.
|
|
109
|
+
"""
|
|
110
|
+
# pylint: disable=too-many-arguments
|
|
111
|
+
image_paths = sorted(
|
|
112
|
+
str(f.resolve()) for f in img_dir.iterdir() if f.is_file() and f.suffix in IMG_EXTENSIONS
|
|
113
|
+
)
|
|
114
|
+
if limit:
|
|
115
|
+
image_paths = image_paths[:limit]
|
|
116
|
+
if shuffle:
|
|
117
|
+
random.shuffle(image_paths)
|
|
118
|
+
yield from (
|
|
119
|
+
read_and_resize_plate_image(
|
|
120
|
+
i,
|
|
121
|
+
img_height=height,
|
|
122
|
+
img_width=width,
|
|
123
|
+
image_color_mode=image_color_mode,
|
|
124
|
+
keep_aspect_ratio=keep_aspect_ratio,
|
|
125
|
+
interpolation_method=interpolation_method,
|
|
126
|
+
padding_color=padding_color,
|
|
127
|
+
)
|
|
128
|
+
for i in image_paths
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def postprocess_model_output(
|
|
133
|
+
prediction: npt.NDArray,
|
|
134
|
+
alphabet: str,
|
|
135
|
+
max_plate_slots: int,
|
|
136
|
+
vocab_size: int,
|
|
137
|
+
) -> tuple[str, npt.NDArray]:
|
|
138
|
+
"""
|
|
139
|
+
Return plate text and confidence scores from raw model output.
|
|
140
|
+
"""
|
|
141
|
+
prediction = prediction.reshape((max_plate_slots, vocab_size))
|
|
142
|
+
probs = np.max(prediction, axis=-1)
|
|
143
|
+
prediction = np.argmax(prediction, axis=-1)
|
|
144
|
+
plate = "".join([alphabet[x] for x in prediction])
|
|
145
|
+
return plate, probs
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def low_confidence_positions(probs, thresh=0.3) -> npt.NDArray:
|
|
149
|
+
"""Returns indices of elements in `probs` less than `thresh`, indicating low confidence."""
|
|
150
|
+
return np.where(np.array(probs) < thresh)[0]
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def display_predictions(
|
|
154
|
+
image: npt.NDArray,
|
|
155
|
+
plate: str,
|
|
156
|
+
probs: npt.NDArray,
|
|
157
|
+
low_conf_thresh: float,
|
|
158
|
+
) -> None:
|
|
159
|
+
"""
|
|
160
|
+
Display plate and corresponding prediction.
|
|
161
|
+
"""
|
|
162
|
+
plate_str = "".join(plate)
|
|
163
|
+
logging.info("Plate: %s", plate_str)
|
|
164
|
+
logging.info("Confidence: %s", probs)
|
|
165
|
+
image_to_show = cv2.resize(image, None, fx=3, fy=3, interpolation=cv2.INTER_LINEAR)
|
|
166
|
+
if len(image_to_show.shape) == 2:
|
|
167
|
+
image_to_show = cv2.cvtColor(image_to_show, cv2.COLOR_GRAY2RGB)
|
|
168
|
+
elif image_to_show.shape[2] == 3:
|
|
169
|
+
image_to_show = cv2.cvtColor(image_to_show, cv2.COLOR_BGR2RGB)
|
|
170
|
+
# Average probabilities
|
|
171
|
+
avg_prob = np.mean(probs) * 100
|
|
172
|
+
cv2.putText(
|
|
173
|
+
image_to_show,
|
|
174
|
+
f"{plate_str} {avg_prob:.{2}f}%",
|
|
175
|
+
org=(5, 30),
|
|
176
|
+
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
|
|
177
|
+
fontScale=1,
|
|
178
|
+
color=(0, 0, 0),
|
|
179
|
+
lineType=1,
|
|
180
|
+
thickness=6,
|
|
181
|
+
)
|
|
182
|
+
cv2.putText(
|
|
183
|
+
image_to_show,
|
|
184
|
+
f"{plate_str} {avg_prob:.{2}f}%",
|
|
185
|
+
org=(5, 30),
|
|
186
|
+
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
|
|
187
|
+
fontScale=1,
|
|
188
|
+
color=(255, 255, 255),
|
|
189
|
+
lineType=1,
|
|
190
|
+
thickness=2,
|
|
191
|
+
)
|
|
192
|
+
# Display character with low confidence
|
|
193
|
+
low_conf_chars = "Low conf. on: " + " ".join(
|
|
194
|
+
[plate[i] for i in low_confidence_positions(probs, thresh=low_conf_thresh)]
|
|
195
|
+
)
|
|
196
|
+
cv2.putText(
|
|
197
|
+
image_to_show,
|
|
198
|
+
low_conf_chars,
|
|
199
|
+
org=(5, 200),
|
|
200
|
+
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
|
|
201
|
+
fontScale=0.7,
|
|
202
|
+
color=(0, 0, 220),
|
|
203
|
+
lineType=1,
|
|
204
|
+
thickness=2,
|
|
205
|
+
)
|
|
206
|
+
try:
|
|
207
|
+
cv2.imshow("plates", image_to_show)
|
|
208
|
+
if cv2.waitKey(0) & 0xFF == ord("q"):
|
|
209
|
+
return
|
|
210
|
+
except cv2.error as e: # pylint: disable=catching-non-exception
|
|
211
|
+
raise RuntimeError( # pylint: disable=bad-exception-cause
|
|
212
|
+
"This visualization requires full OpenCV with GUI support. "
|
|
213
|
+
"Install with `pip install opencv-python` instead of headless."
|
|
214
|
+
) from e
|
|
@@ -9,7 +9,6 @@ class TextPostprocessor:
|
|
|
9
9
|
Args:
|
|
10
10
|
logging_level: The level of logging detail. Default is INFO.
|
|
11
11
|
"""
|
|
12
|
-
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging_level)
|
|
13
12
|
self.logger = logging.getLogger('TextPostprocessor')
|
|
14
13
|
|
|
15
14
|
self.task_processors = {
|
|
@@ -312,6 +312,7 @@ class PostProcessor:
|
|
|
312
312
|
) -> None:
|
|
313
313
|
"""Remove parameters that aren't needed for specific use cases."""
|
|
314
314
|
facial_recognition_usecases = {"face_recognition"}
|
|
315
|
+
license_plate_monitoring_usecases = {"license_plate_monitor"}
|
|
315
316
|
|
|
316
317
|
if usecase not in facial_recognition_usecases:
|
|
317
318
|
if "facial_recognition_server_id" in config_params:
|
|
@@ -319,7 +320,14 @@ class PostProcessor:
|
|
|
319
320
|
f"Removing facial_recognition_server_id from {usecase} config"
|
|
320
321
|
)
|
|
321
322
|
config_params.pop("facial_recognition_server_id", None)
|
|
322
|
-
|
|
323
|
+
|
|
324
|
+
if usecase not in license_plate_monitoring_usecases:
|
|
325
|
+
if "lpr_server_id" in config_params:
|
|
326
|
+
logging.debug(f"Removing lpr_server_id from {usecase} config")
|
|
327
|
+
config_params.pop("lpr_server_id", None)
|
|
328
|
+
|
|
329
|
+
# Keep session and lpr_server_id only for use cases that need them
|
|
330
|
+
if usecase not in facial_recognition_usecases and usecase not in license_plate_monitoring_usecases:
|
|
323
331
|
if "session" in config_params:
|
|
324
332
|
logging.debug(f"Removing session from {usecase} config")
|
|
325
333
|
config_params.pop("session", None)
|
|
@@ -660,8 +668,13 @@ class PostProcessor:
|
|
|
660
668
|
if not use_case_class:
|
|
661
669
|
raise ValueError(f"Use case '{config.category}/{config.usecase}' not found")
|
|
662
670
|
|
|
663
|
-
|
|
664
|
-
|
|
671
|
+
|
|
672
|
+
if isinstance(use_case_class, FaceRecognitionEmbeddingUseCase):
|
|
673
|
+
use_case = use_case_class(config=config)
|
|
674
|
+
else:
|
|
675
|
+
use_case = use_case_class()
|
|
676
|
+
logger.info(f"Created use case instance for: {config.category}/{config.usecase}")
|
|
677
|
+
|
|
665
678
|
|
|
666
679
|
# Cache the instance
|
|
667
680
|
self._use_case_cache[cache_key] = use_case
|
|
@@ -689,7 +702,8 @@ class PostProcessor:
|
|
|
689
702
|
FlareAnalysisUseCase,
|
|
690
703
|
LicensePlateMonitorUseCase,
|
|
691
704
|
AgeGenderUseCase,
|
|
692
|
-
PeopleTrackingUseCase
|
|
705
|
+
PeopleTrackingUseCase,
|
|
706
|
+
FaceRecognitionEmbeddingUseCase
|
|
693
707
|
}
|
|
694
708
|
|
|
695
709
|
# Async use cases
|
|
@@ -1017,7 +1031,7 @@ class PostProcessor:
|
|
|
1017
1031
|
"total_processing_time": 0.0,
|
|
1018
1032
|
}
|
|
1019
1033
|
|
|
1020
|
-
def _parse_config(
|
|
1034
|
+
def _parse_config( # TODO: remove all of the kwargs that are not in the use case config
|
|
1021
1035
|
self, config: Union[BaseConfig, Dict[str, Any], str, Path]
|
|
1022
1036
|
) -> BaseConfig:
|
|
1023
1037
|
"""Parse configuration from various input formats."""
|
|
@@ -1,23 +1,45 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import sys
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
import logging
|
|
5
|
+
import subprocess
|
|
6
|
+
import shutil
|
|
7
|
+
import os
|
|
8
|
+
log_file = open("pip_jetson_bti.log", "w")
|
|
9
|
+
cmd = ["pip", "install", "importlib-resources"]
|
|
10
|
+
subprocess.run(
|
|
11
|
+
cmd,
|
|
12
|
+
stdout=log_file,
|
|
13
|
+
stderr=subprocess.STDOUT,
|
|
14
|
+
preexec_fn=os.setpgrp
|
|
15
|
+
)
|
|
16
|
+
cmd = ["pip", "install", "httpx", "aiohttp", "filterpy"]
|
|
17
|
+
subprocess.run(
|
|
18
|
+
cmd,
|
|
19
|
+
stdout=log_file,
|
|
20
|
+
stderr=subprocess.STDOUT,
|
|
21
|
+
preexec_fn=os.setpgrp
|
|
22
|
+
)
|
|
23
|
+
log_file.close()
|
|
24
|
+
|
|
1
25
|
import numpy as np
|
|
2
26
|
from typing import List, Dict, Tuple, Optional
|
|
3
27
|
from dataclasses import dataclass, field
|
|
4
|
-
from pathlib import Path
|
|
5
28
|
import cv2
|
|
6
|
-
import os
|
|
7
29
|
import io
|
|
8
30
|
import threading
|
|
9
|
-
|
|
10
|
-
import
|
|
31
|
+
import onnxruntime as ort
|
|
32
|
+
from PIL import Image
|
|
33
|
+
|
|
34
|
+
|
|
11
35
|
try:
|
|
12
36
|
from transformers import CLIPProcessor
|
|
13
|
-
|
|
14
|
-
from PIL import Image
|
|
37
|
+
print("transformers imported successfully")
|
|
15
38
|
from importlib.resources import files as ir_files, as_file as ir_as_file
|
|
16
|
-
|
|
17
39
|
except:
|
|
18
40
|
ir_files = None
|
|
19
41
|
ir_as_file = None
|
|
20
|
-
print("Unable to import
|
|
42
|
+
print("Unable to import transformers/irlib-resources @ clip.py")
|
|
21
43
|
|
|
22
44
|
def load_model_from_checkpoint(checkpoint_url: str, providers: Optional[List] = None):
|
|
23
45
|
"""
|
|
@@ -97,11 +119,22 @@ class ClipProcessor:
|
|
|
97
119
|
cwd = os.getcwd()
|
|
98
120
|
print("Current working directory:", cwd)
|
|
99
121
|
|
|
122
|
+
log_file = open("pip_jetson_bti.log", "w")
|
|
123
|
+
cmd = ["pip", "install", "--force-reinstall", "huggingface_hub", "regex", "safetensors"]
|
|
124
|
+
subprocess.Popen(
|
|
125
|
+
cmd,
|
|
126
|
+
stdout=log_file,
|
|
127
|
+
stderr=subprocess.STDOUT,
|
|
128
|
+
preexec_fn=os.setpgrp
|
|
129
|
+
)
|
|
130
|
+
|
|
100
131
|
# Determine and enforce providers (prefer CUDA only)
|
|
101
132
|
try:
|
|
102
133
|
available = ort.get_available_providers()
|
|
103
134
|
except Exception:
|
|
135
|
+
print("You are seein this error because of ort :(")
|
|
104
136
|
available = []
|
|
137
|
+
print("True OG Available ONNX providers:", available, 'providers(if any):',providers)
|
|
105
138
|
|
|
106
139
|
if providers is None:
|
|
107
140
|
if "CUDAExecutionProvider" in available:
|
|
@@ -114,6 +147,7 @@ class ClipProcessor:
|
|
|
114
147
|
|
|
115
148
|
# Thread-safety to serialize processing
|
|
116
149
|
self._lock = threading.Lock()
|
|
150
|
+
print("Curr Providersss: ",self.providers)
|
|
117
151
|
|
|
118
152
|
self.image_sess = load_model_from_checkpoint(self.image_url, providers=self.providers)
|
|
119
153
|
self.text_sess = load_model_from_checkpoint(self.text_url, providers=self.providers)
|
|
@@ -9,7 +9,7 @@ from datetime import datetime, timedelta
|
|
|
9
9
|
import logging
|
|
10
10
|
|
|
11
11
|
# Import your existing color extraction functions
|
|
12
|
-
from color_map_utils import extract_major_colors
|
|
12
|
+
#from color_map_utils import extract_major_colors
|
|
13
13
|
|
|
14
14
|
# Configure logging
|
|
15
15
|
logger = logging.getLogger(__name__)
|
|
@@ -163,7 +163,7 @@ class VideoColorClassifier:
|
|
|
163
163
|
continue
|
|
164
164
|
|
|
165
165
|
# Extract colors
|
|
166
|
-
major_colors = extract_major_colors(cropped_obj, k=self.top_k_colors)
|
|
166
|
+
major_colors = [()] #extract_major_colors(cropped_obj, k=self.top_k_colors)
|
|
167
167
|
main_color = major_colors[0][0] if major_colors else "unknown"
|
|
168
168
|
|
|
169
169
|
# Create detailed result entry
|
|
@@ -15,7 +15,6 @@ from ..utils import (
|
|
|
15
15
|
filter_by_categories,
|
|
16
16
|
apply_category_mapping,
|
|
17
17
|
match_results_structure,
|
|
18
|
-
extract_major_colors,
|
|
19
18
|
count_objects_by_category,
|
|
20
19
|
calculate_counting_summary,
|
|
21
20
|
match_results_structure,
|
|
@@ -26,7 +25,11 @@ from ..utils import (
|
|
|
26
25
|
)
|
|
27
26
|
from ..utils.geometry_utils import get_bbox_center, point_in_polygon, get_bbox_bottom25_center
|
|
28
27
|
from ..usecases.color.clip import ClipProcessor
|
|
29
|
-
|
|
28
|
+
import sys
|
|
29
|
+
from pathlib import Path
|
|
30
|
+
import logging
|
|
31
|
+
import subprocess
|
|
32
|
+
import shutil
|
|
30
33
|
|
|
31
34
|
@dataclass
|
|
32
35
|
class ColorDetectionConfig(BaseConfig):
|
|
@@ -86,13 +89,13 @@ class ColorDetectionConfig(BaseConfig):
|
|
|
86
89
|
detector = True
|
|
87
90
|
|
|
88
91
|
#JBK_720_GATE POLYGON = [[86, 328], [844, 317], [1277, 520], [1273, 707], [125, 713]]
|
|
89
|
-
zone_config: Optional[Dict[str, List[List[float]]]] = field(
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
)
|
|
92
|
+
zone_config: Optional[Dict[str, List[List[float]]]] = None #field(
|
|
93
|
+
# default_factory=lambda: {
|
|
94
|
+
# "zones": {
|
|
95
|
+
# "Interest_Region": [[86, 328], [844, 317], [1277, 520], [1273, 707], [125, 713]],
|
|
96
|
+
# }
|
|
97
|
+
# }
|
|
98
|
+
# )
|
|
96
99
|
# true_import: bool = False
|
|
97
100
|
|
|
98
101
|
def validate(self) -> List[str]:
|
|
@@ -116,6 +119,15 @@ class ColorDetectionConfig(BaseConfig):
|
|
|
116
119
|
def __post_init__(self):
|
|
117
120
|
# Lazy initialization: the ClipProcessor will be created once by the use case
|
|
118
121
|
# to avoid repeated model downloads and to ensure GPU session reuse.
|
|
122
|
+
# log_file = open("pip_jetson_bt.log", "w")
|
|
123
|
+
# cmd = ["pip", "install", "--force-reinstall", "huggingface_hub", "regex", "safetensors"]
|
|
124
|
+
# subprocess.Popen(
|
|
125
|
+
# cmd,
|
|
126
|
+
# stdout=log_file,
|
|
127
|
+
# stderr=subprocess.STDOUT,
|
|
128
|
+
# preexec_fn=os.setpgrp
|
|
129
|
+
# )
|
|
130
|
+
print("Came to post_init and libraries installed!!!")
|
|
119
131
|
if self.detector:
|
|
120
132
|
self.detector = ClipProcessor()
|
|
121
133
|
print("ClipProcessor Loaded Successfully!!")
|
|
@@ -669,95 +681,6 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
669
681
|
return True
|
|
670
682
|
return False
|
|
671
683
|
|
|
672
|
-
def _analyze_colors_in_video(
|
|
673
|
-
self,
|
|
674
|
-
data: Any,
|
|
675
|
-
video_bytes: bytes,
|
|
676
|
-
config: ColorDetectionConfig
|
|
677
|
-
) -> List[Dict[str, Any]]:
|
|
678
|
-
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as temp_video:
|
|
679
|
-
temp_video.write(video_bytes)
|
|
680
|
-
video_path = temp_video.name
|
|
681
|
-
|
|
682
|
-
try:
|
|
683
|
-
cap = cv2.VideoCapture(video_path)
|
|
684
|
-
if not cap.isOpened():
|
|
685
|
-
raise RuntimeError("Failed to open video file")
|
|
686
|
-
|
|
687
|
-
fps = config.fps or cap.get(cv2.CAP_PROP_FPS)
|
|
688
|
-
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
689
|
-
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
690
|
-
|
|
691
|
-
color_analysis = []
|
|
692
|
-
frame_id = 0
|
|
693
|
-
|
|
694
|
-
while True:
|
|
695
|
-
ret, frame = cap.read()
|
|
696
|
-
if not ret:
|
|
697
|
-
break
|
|
698
|
-
|
|
699
|
-
if frame_id % config.frame_skip != 0:
|
|
700
|
-
frame_id += 1
|
|
701
|
-
continue
|
|
702
|
-
|
|
703
|
-
frame_key = str(frame_id)
|
|
704
|
-
timestamp = frame_id / fps
|
|
705
|
-
frame_detections = self._get_frame_detections(data, frame_key)
|
|
706
|
-
if not frame_detections:
|
|
707
|
-
frame_id += 1
|
|
708
|
-
continue
|
|
709
|
-
|
|
710
|
-
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
711
|
-
|
|
712
|
-
for detection in frame_detections:
|
|
713
|
-
if detection.get("confidence", 1.0) < config.confidence_threshold:
|
|
714
|
-
continue
|
|
715
|
-
|
|
716
|
-
bbox = detection.get("bounding_box", detection.get("bbox"))
|
|
717
|
-
if not bbox:
|
|
718
|
-
continue
|
|
719
|
-
|
|
720
|
-
# Check all zones
|
|
721
|
-
zones = config.zone_config['zones'] if config.zone_config else {}
|
|
722
|
-
in_any_zone = not zones # Process all if no zones
|
|
723
|
-
zone_name = None
|
|
724
|
-
for z_name, zone_polygon in zones.items():
|
|
725
|
-
if self._is_in_zone(bbox, zone_polygon):
|
|
726
|
-
in_any_zone = True
|
|
727
|
-
zone_name = z_name
|
|
728
|
-
break
|
|
729
|
-
if not in_any_zone:
|
|
730
|
-
continue # Skip detections outside zones
|
|
731
|
-
|
|
732
|
-
crop = self._crop_bbox(rgb_frame, bbox, config.bbox_format)
|
|
733
|
-
if crop.size == 0:
|
|
734
|
-
continue
|
|
735
|
-
|
|
736
|
-
major_colors = extract_major_colors(crop, k=config.top_k_colors)
|
|
737
|
-
main_color = major_colors[0][0] if major_colors else "unknown"
|
|
738
|
-
|
|
739
|
-
color_record = {
|
|
740
|
-
"frame_id": frame_key,
|
|
741
|
-
"timestamp": round(timestamp, 2),
|
|
742
|
-
"category": detection.get("category", "unknown"),
|
|
743
|
-
"confidence": round(detection.get("confidence", 0.0), 3),
|
|
744
|
-
"main_color": main_color,
|
|
745
|
-
"major_colors": major_colors,
|
|
746
|
-
"bbox": bbox,
|
|
747
|
-
"detection_id": detection.get("id", f"det_{len(color_analysis)}"),
|
|
748
|
-
"track_id": detection.get("track_id"),
|
|
749
|
-
"zone_name": zone_name
|
|
750
|
-
}
|
|
751
|
-
color_analysis.append(color_record)
|
|
752
|
-
|
|
753
|
-
frame_id += 1
|
|
754
|
-
|
|
755
|
-
cap.release()
|
|
756
|
-
return color_analysis
|
|
757
|
-
|
|
758
|
-
finally:
|
|
759
|
-
if os.path.exists(video_path):
|
|
760
|
-
os.unlink(video_path)
|
|
761
684
|
|
|
762
685
|
def _analyze_colors_in_image(
|
|
763
686
|
self,
|