matrice-inference 0.1.2__py3-none-any.whl → 0.1.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of matrice-inference might be problematic. Click here for more details.

@@ -2,6 +2,7 @@
2
2
 
3
3
  import os
4
4
  import sys
5
+ import platform
5
6
  from matrice_common.utils import dependencies_check
6
7
 
7
8
  base = [
@@ -24,36 +25,52 @@ base = [
24
25
  "transformers"
25
26
  ]
26
27
 
27
- # Install base dependencies first
28
- dependencies_check(base)
29
-
30
28
  # Helper to attempt installation and verify importability
31
29
  def _install_and_verify(pkg: str, import_name: str):
32
30
  """Install a package expression and return True if the import succeeds."""
33
- if dependencies_check([pkg]):
34
- try:
35
- __import__(import_name)
36
- return True
37
- except ImportError:
38
- return False
39
- return False
31
+ try:
32
+ if pkg=='onnxruntime-gpu':
33
+ pkg = 'onnxruntime'
34
+ __import__(pkg)
35
+ return True
36
+ except:
37
+ if dependencies_check([pkg]):
38
+ try:
39
+ __import__(import_name)
40
+ return True
41
+ except ImportError:
42
+ return False
43
+ return False
44
+
45
+ # Runtime gating for optional OCR bootstrap (default OFF), and never on Jetson
46
+ _ENABLE_OCR_BOOTSTRAP = os.getenv("MATRICE_ENABLE_OCR_BOOTSTRAP", "0")
47
+ _IS_JETSON = (platform.machine().lower() in ("aarch64", "arm64"))
40
48
 
41
- if not dependencies_check(["opencv-python"]):
42
- dependencies_check(["opencv-python-headless"])
49
+ print("*******************************Deployment ENV Info**********************************")
50
+ print(f"ENABLE_JETSON_PIP_SETTINGS: {_ENABLE_OCR_BOOTSTRAP}") #0 if OFF, 1 if ON, this will be set to 1 in jetson byom codebase.
51
+ print(f"IS_JETSON_ARCH?: {_IS_JETSON}") #True if Jetson, False otherwise
52
+ print("*************************************************************************************")
43
53
 
44
- # Attempt GPU-specific dependencies first
45
- _gpu_ok = _install_and_verify("onnxruntime-gpu", "onnxruntime") and _install_and_verify(
46
- "fast-plate-ocr[onnx-gpu]", "fast_plate_ocr"
47
- )
54
+ if not int(_ENABLE_OCR_BOOTSTRAP) and not _IS_JETSON:
55
+ # Install base dependencies first
56
+ dependencies_check(base)
48
57
 
49
- if not _gpu_ok:
50
- # Fallback to CPU variants
51
- _cpu_ok = _install_and_verify("onnxruntime", "onnxruntime") and _install_and_verify(
52
- "fast-plate-ocr[onnx]", "fast_plate_ocr"
58
+ if not dependencies_check(["opencv-python"]):
59
+ dependencies_check(["opencv-python-headless"])
60
+
61
+ # Attempt GPU-specific dependencies first
62
+ _gpu_ok = _install_and_verify("onnxruntime-gpu", "onnxruntime") and _install_and_verify(
63
+ "fast-plate-ocr[onnx-gpu]", "fast_plate_ocr"
53
64
  )
54
- if not _cpu_ok:
55
- # Last-chance fallback without extras tag (PyPI sometimes lacks them)
56
- _install_and_verify("fast-plate-ocr", "fast_plate_ocr")
65
+
66
+ if not _gpu_ok:
67
+ # Fallback to CPU variants
68
+ _cpu_ok = _install_and_verify("onnxruntime", "onnxruntime") and _install_and_verify(
69
+ "fast-plate-ocr[onnx]", "fast_plate_ocr"
70
+ )
71
+ if not _cpu_ok:
72
+ # Last-chance fallback without extras tag (PyPI sometimes lacks them)
73
+ _install_and_verify("fast-plate-ocr", "fast_plate_ocr")
57
74
 
58
75
  # matrice_deps = ["matrice_common", "matrice_analytics", "matrice"]
59
76
 
@@ -1,23 +1,29 @@
1
1
  import os
2
2
  import logging
3
3
 
4
- # Root logger
5
- logging.basicConfig(level=logging.DEBUG)
4
+ # Define paths
5
+ log_path = os.path.join(os.getcwd(), "deploy_server.log")
6
6
 
7
- # Console handler (INFO+)
7
+ # Create handlers explicitly
8
8
  console_handler = logging.StreamHandler()
9
- console_handler.setLevel(logging.INFO)
10
-
11
- # File handler (DEBUG+)
12
- log_path = os.path.join(os.getcwd(), "deploy_server.log")
13
9
  file_handler = logging.FileHandler(log_path)
10
+
11
+ # Set levels
12
+ console_handler.setLevel(logging.INFO)
14
13
  file_handler.setLevel(logging.DEBUG)
15
14
 
16
- # Formatter
15
+ # Define a formatter
17
16
  formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
18
17
  console_handler.setFormatter(formatter)
19
18
  file_handler.setFormatter(formatter)
20
19
 
21
- # Add handlers to root logger
22
- logging.getLogger().addHandler(console_handler)
23
- logging.getLogger().addHandler(file_handler)
20
+ # Get the root logger
21
+ logger = logging.getLogger()
22
+ logger.setLevel(logging.DEBUG) # Root level must be the lowest (DEBUG)
23
+
24
+ # Optional: remove any default handlers if basicConfig was called earlier
25
+ if logger.hasHandlers():
26
+ logger.handlers.clear()
27
+
28
+ logger.addHandler(console_handler)
29
+ logger.addHandler(file_handler)
@@ -17,8 +17,6 @@ from matrice_common.utils import dependencies_check
17
17
  TRITON_DOCKER_IMAGE = "nvcr.io/nvidia/tritonserver:23.08-py3"
18
18
  BASE_PATH = "./model_repository"
19
19
 
20
- logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s")
21
-
22
20
  class TritonServer:
23
21
  def __init__(
24
22
  self,
@@ -1161,7 +1159,7 @@ class TritonInference:
1161
1159
  model_alphabet: str = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_",
1162
1160
  return_confidence: bool = True,
1163
1161
  confidence_threshold: float = 0.0, # Disabled threshold to match ONNX
1164
- ) -> Tuple[list[str], np.ndarray] | list[str]:
1162
+ ) -> Union[Tuple[List[str], np.ndarray], List[str]]:
1165
1163
  """Postprocess OCR model outputs into license plate strings.
1166
1164
 
1167
1165
  Args:
@@ -320,7 +320,10 @@ class MatriceDeployServer:
320
320
  post_processing_config = {}
321
321
  if isinstance(post_processing_config, dict):
322
322
  post_processing_config["facial_recognition_server_id"] = self.job_params.get("facial_recognition_server_id", None)
323
+ post_processing_config["lpr_server_id"] = self.job_params.get("lpr_server_id", None)
323
324
  post_processing_config["session"] = self.session # Pass the session to post-processing
325
+ # Pass deployment_id for facial recognition deployment update
326
+ post_processing_config["deployment_id"] = self.deployment_id
324
327
 
325
328
  # Get index_to_category from action_tracker if available
326
329
  index_to_category = None
@@ -374,10 +377,6 @@ class MatriceDeployServer:
374
377
  self.streaming_pipeline = StreamingPipeline(
375
378
  inference_interface=self.inference_interface,
376
379
  post_processor=self.post_processor,
377
- consumer_threads=self.job_params.get("consumer_threads", 4),
378
- producer_threads=self.job_params.get("producer_threads", 2),
379
- inference_threads=self.job_params.get("inference_threads", 4),
380
- postprocessing_threads=self.job_params.get("postprocessing_threads", 2),
381
380
  inference_queue_maxsize=self.job_params.get("inference_queue_maxsize", 5000),
382
381
  postproc_queue_maxsize=self.job_params.get("postproc_queue_maxsize", 5000),
383
382
  output_queue_maxsize=self.job_params.get("output_queue_maxsize", 5000),