slvehicle 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2 @@
1
+ include README.md
2
+ recursive-include slvehicle/models *.onnx *.pt
@@ -0,0 +1,46 @@
1
+ Metadata-Version: 2.4
2
+ Name: slvehicle
3
+ Version: 0.1.0
4
+ Summary: Sri Lanka Vehicle Detection & Segmentation (slvy)
5
+ Author: SriLankaVehicleAI
6
+ Requires-Python: >=3.8
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: numpy>=1.23
9
+ Requires-Dist: opencv-python>=4.7
10
+ Requires-Dist: onnxruntime>=1.16
11
+ Provides-Extra: gpu
12
+ Requires-Dist: onnxruntime-gpu>=1.16; extra == "gpu"
13
+ Provides-Extra: torch
14
+ Requires-Dist: torch>=2.0; extra == "torch"
15
+ Requires-Dist: ultralytics>=8.1; extra == "torch"
16
+ Dynamic: author
17
+ Dynamic: requires-python
18
+
19
+ # slvehicle (slvy) — Sri Lanka Vehicle AI
20
+
21
+ Vehicle detection & instance segmentation library for Sri Lankan traffic.
22
+
23
+ **Classes:** bike, bus, car, lorry, tuktuk, van
24
+
25
+ ## Install
26
+
27
+ ```bash
28
+ pip install slvehicle # CPU
29
+ pip install slvehicle[gpu] # GPU (CUDA)
30
+ ```
31
+
32
+ ## Usage
33
+
34
+ ```python
35
+ from slvehicle import VehicleDetector
36
+
37
+ det = VehicleDetector() # auto device
38
+ result = det.detect('image.jpg')
39
+ det.detect_and_save('input_folder/', 'output_folder/')
40
+ ```
41
+
42
+ ## Performance
43
+
44
+ - mAP50: 97.3%
45
+ - Model size: 5.6 MB (FP16 ONNX)
46
+ - Speed: ~15 ms (GPU), ~80 ms (CPU)
@@ -0,0 +1,28 @@
1
+ # slvehicle (slvy) — Sri Lanka Vehicle AI
2
+
3
+ Vehicle detection & instance segmentation library for Sri Lankan traffic.
4
+
5
+ **Classes:** bike, bus, car, lorry, tuktuk, van
6
+
7
+ ## Install
8
+
9
+ ```bash
10
+ pip install slvehicle # CPU
11
+ pip install slvehicle[gpu] # GPU (CUDA)
12
+ ```
13
+
14
+ ## Usage
15
+
16
+ ```python
17
+ from slvehicle import VehicleDetector
18
+
19
+ det = VehicleDetector() # auto device
20
+ result = det.detect('image.jpg')
21
+ det.detect_and_save('input_folder/', 'output_folder/')
22
+ ```
23
+
24
+ ## Performance
25
+
26
+ - mAP50: 97.3%
27
+ - Model size: 5.6 MB (FP16 ONNX)
28
+ - Speed: ~15 ms (GPU), ~80 ms (CPU)
@@ -0,0 +1,19 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "slvehicle"
7
+ version = "0.1.0"
8
+ description = "Sri Lanka Vehicle Detection & Segmentation (slvy)"
9
+ readme = "README.md"
10
+ requires-python = ">=3.8"
11
+ dependencies = [
12
+ "numpy>=1.23",
13
+ "opencv-python>=4.7",
14
+ "onnxruntime>=1.16",
15
+ ]
16
+
17
+ [project.optional-dependencies]
18
+ gpu = ["onnxruntime-gpu>=1.16"]
19
+ torch = ["torch>=2.0", "ultralytics>=8.1"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,21 @@
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(
4
+ name="slvehicle",
5
+ version="0.1.0",
6
+ description="Sri Lanka Vehicle Detection & Segmentation (slvy)",
7
+ author="SriLankaVehicleAI",
8
+ packages=find_packages(),
9
+ include_package_data=True,
10
+ package_data={"slvehicle": ["models/*.onnx", "models/*.pt"]},
11
+ install_requires=[
12
+ "numpy>=1.23",
13
+ "opencv-python>=4.7",
14
+ "onnxruntime>=1.16",
15
+ ],
16
+ extras_require={
17
+ "gpu": ["onnxruntime-gpu>=1.16"],
18
+ "torch": ["torch>=2.0", "ultralytics>=8.1"],
19
+ },
20
+ python_requires=">=3.8",
21
+ )
@@ -0,0 +1,18 @@
1
+ """
2
+ slvehicle — Sri Lanka Vehicle Detection Library
3
+ ================================================
4
+ Quick start:
5
+
6
+ from slvehicle import VehicleDetector
7
+
8
+ det = VehicleDetector()
9
+ results = det.detect("car.jpg")
10
+ det.detect_and_save("input_folder", "output_folder")
11
+ """
12
+
13
+ __version__ = "0.1.0"
14
+
15
+ from .detector import VehicleDetector
16
+ from .visualizer import Visualizer
17
+
18
+ __all__ = ["VehicleDetector", "Visualizer", "__version__"]
@@ -0,0 +1,28 @@
1
+ """Global configuration constants for slvehicle."""
2
+
3
+ from pathlib import Path
4
+
5
+ PACKAGE_DIR = Path(__file__).resolve().parent
6
+ MODEL_DIR = PACKAGE_DIR / "models"
7
+
8
+ DEFAULT_ONNX = MODEL_DIR / "yolo11n_seg_fp16.onnx"
9
+ DEFAULT_PT = MODEL_DIR / "yolo11n_seg.pt"
10
+
11
+ CLASS_NAMES = ["bike", "bus", "car", "lorry", "tuktuk", "van"]
12
+ NUM_CLASSES = len(CLASS_NAMES)
13
+
14
+ INPUT_SIZE = 640
15
+ CONF_THRES = 0.25
16
+ IOU_THRES = 0.45
17
+ MAX_DET = 300
18
+
19
+ CLASS_COLORS = {
20
+ "bike": (255, 56, 56),
21
+ "bus": (56, 255, 56),
22
+ "car": (56, 56, 255),
23
+ "lorry": (255, 157, 56),
24
+ "tuktuk": (255, 56, 255),
25
+ "van": (56, 255, 255),
26
+ }
27
+
28
+ IMG_EXTS = (".jpg", ".jpeg", ".png", ".bmp", ".tif", ".tiff", ".webp")
@@ -0,0 +1,95 @@
1
+ """Main user-facing API."""
2
+
3
+ import os
4
+ import cv2
5
+ import time
6
+ from pathlib import Path
7
+
8
+ from .config import DEFAULT_ONNX, CLASS_NAMES, IMG_EXTS, CONF_THRES, IOU_THRES
9
+ from .engine import get_engine
10
+ from .preprocessor import preprocess
11
+ from .postprocessor import postprocess
12
+ from .visualizer import Visualizer
13
+
14
+
15
+ class VehicleDetector:
16
+ def __init__(self, model_path=None, device="auto",
17
+ conf=CONF_THRES, iou=IOU_THRES):
18
+ self.model_path = Path(model_path) if model_path else DEFAULT_ONNX
19
+ if not self.model_path.exists():
20
+ raise FileNotFoundError(f"Model not found: {self.model_path}")
21
+ self.engine = get_engine(self.model_path, device=device)
22
+ self.conf = conf
23
+ self.iou = iou
24
+ self.visualizer = Visualizer()
25
+ print(f"[slvy] VehicleDetector ready on '{self.engine.device}'.")
26
+
27
+ def detect(self, image):
28
+ if isinstance(image, (str, Path)):
29
+ img = cv2.imread(str(image))
30
+ if img is None:
31
+ raise ValueError(f"Cannot read image: {image}")
32
+ else:
33
+ img = image
34
+ tensor, meta = preprocess(img)
35
+ outputs = self.engine.infer(tensor)
36
+ return postprocess(outputs, meta, self.conf, self.iou)
37
+
38
+ def detect_and_show(self, image, window="slvy"):
39
+ if isinstance(image, (str, Path)):
40
+ img = cv2.imread(str(image))
41
+ else:
42
+ img = image
43
+ result = self.detect(img)
44
+ vis = self.visualizer.draw(img, result)
45
+ cv2.imshow(window, vis)
46
+ cv2.waitKey(0)
47
+ cv2.destroyAllWindows()
48
+ return result
49
+
50
+ def detect_folder(self, input_dir):
51
+ input_dir = Path(input_dir)
52
+ results = {}
53
+ files = [p for p in input_dir.iterdir()
54
+ if p.suffix.lower() in IMG_EXTS]
55
+ print(f"[slvy] Processing {len(files)} images from {input_dir}")
56
+ for p in files:
57
+ try:
58
+ results[p.name] = self.detect(p)
59
+ except Exception as e:
60
+ print(f" [WARN] {p.name}: {e}")
61
+ return results
62
+
63
+ def detect_and_save(self, input_dir, output_dir, save_viz=True):
64
+ input_dir = Path(input_dir)
65
+ output_dir = Path(output_dir)
66
+ output_dir.mkdir(parents=True, exist_ok=True)
67
+
68
+ files = [p for p in input_dir.iterdir()
69
+ if p.suffix.lower() in IMG_EXTS]
70
+ print(f"[slvy] Processing {len(files)} images -> {output_dir}")
71
+
72
+ summary = []
73
+ t0 = time.time()
74
+ for p in files:
75
+ img = cv2.imread(str(p))
76
+ if img is None:
77
+ continue
78
+ result = self.detect(img)
79
+ if save_viz:
80
+ vis = self.visualizer.draw(img, result)
81
+ cv2.imwrite(str(output_dir / p.name), vis)
82
+ # txt label file
83
+ txt = output_dir / (p.stem + ".txt")
84
+ with open(txt, "w") as f:
85
+ for i in range(len(result["boxes"])):
86
+ x1, y1, x2, y2 = result["boxes"][i]
87
+ cid = int(result["classes"][i])
88
+ sc = float(result["scores"][i])
89
+ f.write(f"{CLASS_NAMES[cid]} {sc:.4f} "
90
+ f"{x1:.1f} {y1:.1f} {x2:.1f} {y2:.1f}\n")
91
+ summary.append((p.name, len(result["boxes"])))
92
+
93
+ dt = time.time() - t0
94
+ print(f"[slvy] Done in {dt:.2f}s ({dt/max(len(files),1)*1000:.1f} ms/img)")
95
+ return summary
@@ -0,0 +1,7 @@
1
+ """Inference engines."""
2
+
3
+ from .auto_engine import get_engine
4
+ from .cpu_engine import CPUEngine
5
+ from .gpu_engine import GPUEngine
6
+
7
+ __all__ = ["get_engine", "CPUEngine", "GPUEngine"]
@@ -0,0 +1,20 @@
1
+ """Auto engine selection."""
2
+
3
+ import onnxruntime as ort
4
+ from .cpu_engine import CPUEngine
5
+ from .gpu_engine import GPUEngine
6
+
7
+
8
+ def get_engine(model_path, device="auto"):
9
+ device = device.lower()
10
+ if device == "cpu":
11
+ return CPUEngine(model_path)
12
+ if device == "gpu" or device == "cuda":
13
+ return GPUEngine(model_path)
14
+ # auto
15
+ if "CUDAExecutionProvider" in ort.get_available_providers():
16
+ try:
17
+ return GPUEngine(model_path)
18
+ except Exception as e:
19
+ print(f"[slvy] GPU failed ({e}); falling back to CPU.")
20
+ return CPUEngine(model_path)
@@ -0,0 +1,15 @@
1
+ """Base engine interface."""
2
+
3
+ from abc import ABC, abstractmethod
4
+
5
+
6
+ class BaseEngine(ABC):
7
+ @abstractmethod
8
+ def infer(self, input_tensor):
9
+ """Run inference; return list of numpy arrays."""
10
+ pass
11
+
12
+ @property
13
+ @abstractmethod
14
+ def device(self):
15
+ pass
@@ -0,0 +1,25 @@
1
+ """ONNX Runtime CPU engine."""
2
+
3
+ import onnxruntime as ort
4
+ from .base import BaseEngine
5
+
6
+
7
+ class CPUEngine(BaseEngine):
8
+ def __init__(self, model_path):
9
+ self.model_path = str(model_path)
10
+ so = ort.SessionOptions()
11
+ so.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
12
+ self.session = ort.InferenceSession(
13
+ self.model_path,
14
+ sess_options=so,
15
+ providers=["CPUExecutionProvider"],
16
+ )
17
+ self.input_name = self.session.get_inputs()[0].name
18
+ print(f"[slvy] CPUEngine loaded: {self.model_path}")
19
+
20
+ def infer(self, input_tensor):
21
+ return self.session.run(None, {self.input_name: input_tensor})
22
+
23
+ @property
24
+ def device(self):
25
+ return "cpu"
@@ -0,0 +1,23 @@
1
+ """ONNX Runtime GPU engine (CUDA)."""
2
+
3
+ import onnxruntime as ort
4
+ from .base import BaseEngine
5
+
6
+
7
+ class GPUEngine(BaseEngine):
8
+ def __init__(self, model_path):
9
+ self.model_path = str(model_path)
10
+ providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
11
+ self.session = ort.InferenceSession(self.model_path, providers=providers)
12
+ active = self.session.get_providers()
13
+ if "CUDAExecutionProvider" not in active:
14
+ raise RuntimeError("CUDAExecutionProvider not available.")
15
+ self.input_name = self.session.get_inputs()[0].name
16
+ print(f"[slvy] GPUEngine loaded: {self.model_path}")
17
+
18
+ def infer(self, input_tensor):
19
+ return self.session.run(None, {self.input_name: input_tensor})
20
+
21
+ @property
22
+ def device(self):
23
+ return "cuda"
@@ -0,0 +1 @@
1
+ """Bundled models."""
@@ -0,0 +1,142 @@
1
+ """Postprocessing for YOLO11-seg ONNX outputs."""
2
+
3
+ import cv2
4
+ import numpy as np
5
+ from .config import CONF_THRES, IOU_THRES, MAX_DET, NUM_CLASSES, INPUT_SIZE
6
+
7
+
8
+ def xywh2xyxy(x):
9
+ y = np.copy(x)
10
+ y[..., 0] = x[..., 0] - x[..., 2] / 2
11
+ y[..., 1] = x[..., 1] - x[..., 3] / 2
12
+ y[..., 2] = x[..., 0] + x[..., 2] / 2
13
+ y[..., 3] = x[..., 1] + x[..., 3] / 2
14
+ return y
15
+
16
+
17
+ def nms_numpy(boxes, scores, iou_thres=IOU_THRES):
18
+ if len(boxes) == 0:
19
+ return []
20
+ x1, y1, x2, y2 = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
21
+ areas = (x2 - x1) * (y2 - y1)
22
+ order = scores.argsort()[::-1]
23
+ keep = []
24
+ while order.size > 0:
25
+ i = order[0]
26
+ keep.append(i)
27
+ if order.size == 1:
28
+ break
29
+ xx1 = np.maximum(x1[i], x1[order[1:]])
30
+ yy1 = np.maximum(y1[i], y1[order[1:]])
31
+ xx2 = np.minimum(x2[i], x2[order[1:]])
32
+ yy2 = np.minimum(y2[i], y2[order[1:]])
33
+ w = np.maximum(0.0, xx2 - xx1)
34
+ h = np.maximum(0.0, yy2 - yy1)
35
+ inter = w * h
36
+ iou = inter / (areas[i] + areas[order[1:]] - inter + 1e-9)
37
+ inds = np.where(iou <= iou_thres)[0]
38
+ order = order[inds + 1]
39
+ return keep
40
+
41
+
42
+ def sigmoid(x):
43
+ return 1.0 / (1.0 + np.exp(-x))
44
+
45
+
46
+ def process_mask(protos, masks_in, bboxes, shape):
47
+ """Combine mask coeffs with prototypes -> binary masks at input size."""
48
+ c, mh, mw = protos.shape
49
+ masks = sigmoid(masks_in @ protos.reshape(c, -1)).reshape(-1, mh, mw)
50
+ # Upsample to input size
51
+ masks = np.stack([cv2.resize(m, (shape[1], shape[0]),
52
+ interpolation=cv2.INTER_LINEAR)
53
+ for m in masks])
54
+ # Crop to bboxes
55
+ out = []
56
+ for i, b in enumerate(bboxes):
57
+ x1, y1, x2, y2 = b.astype(int)
58
+ crop = np.zeros_like(masks[i], dtype=np.uint8)
59
+ x1 = max(0, x1); y1 = max(0, y1)
60
+ x2 = min(shape[1], x2); y2 = min(shape[0], y2)
61
+ if x2 > x1 and y2 > y1:
62
+ crop[y1:y2, x1:x2] = (masks[i][y1:y2, x1:x2] > 0.5).astype(np.uint8)
63
+ out.append(crop)
64
+ return np.array(out) if out else np.zeros((0, shape[0], shape[1]), dtype=np.uint8)
65
+
66
+
67
+ def scale_to_original(boxes_xyxy, masks, meta):
68
+ """Undo letterbox -> map boxes/masks to original image."""
69
+ ratio = meta["ratio"]
70
+ dx, dy = meta["pad"]
71
+ oh, ow = meta["orig_shape"]
72
+
73
+ boxes_xyxy[:, [0, 2]] -= dx
74
+ boxes_xyxy[:, [1, 3]] -= dy
75
+ boxes_xyxy /= ratio
76
+ boxes_xyxy[:, [0, 2]] = boxes_xyxy[:, [0, 2]].clip(0, ow)
77
+ boxes_xyxy[:, [1, 3]] = boxes_xyxy[:, [1, 3]].clip(0, oh)
78
+
79
+ out_masks = []
80
+ for m in masks:
81
+ # Crop padding then resize back
82
+ m_crop = m[int(dy):m.shape[0]-int(dy) or None,
83
+ int(dx):m.shape[1]-int(dx) or None]
84
+ if m_crop.size == 0:
85
+ out_masks.append(np.zeros((oh, ow), dtype=np.uint8))
86
+ continue
87
+ m_resized = cv2.resize(m_crop, (ow, oh), interpolation=cv2.INTER_NEAREST)
88
+ out_masks.append(m_resized.astype(np.uint8))
89
+ return boxes_xyxy, np.array(out_masks) if out_masks else np.zeros((0, oh, ow), dtype=np.uint8)
90
+
91
+
92
+ def postprocess(outputs, meta, conf_thres=CONF_THRES, iou_thres=IOU_THRES):
93
+ """
94
+ YOLO11-seg ONNX outputs:
95
+ outputs[0]: (1, 4+nc+32, N) detections
96
+ outputs[1]: (1, 32, mh, mw) mask prototypes
97
+ """
98
+ preds = outputs[0]
99
+ protos = outputs[1][0]
100
+
101
+ preds = np.squeeze(preds, 0).T # (N, 4+nc+32)
102
+ nc = NUM_CLASSES
103
+ boxes_xywh = preds[:, :4]
104
+ cls_scores = preds[:, 4:4 + nc]
105
+ mask_coeffs = preds[:, 4 + nc:]
106
+
107
+ cls_ids = cls_scores.argmax(axis=1)
108
+ confs = cls_scores.max(axis=1)
109
+
110
+ keep_mask = confs > conf_thres
111
+ boxes_xywh = boxes_xywh[keep_mask]
112
+ confs = confs[keep_mask]
113
+ cls_ids = cls_ids[keep_mask]
114
+ mask_coeffs = mask_coeffs[keep_mask]
115
+
116
+ if len(boxes_xywh) == 0:
117
+ return {
118
+ "boxes": np.zeros((0, 4)),
119
+ "scores": np.zeros((0,)),
120
+ "classes": np.zeros((0,), dtype=int),
121
+ "masks": np.zeros((0, meta["orig_shape"][0], meta["orig_shape"][1]),
122
+ dtype=np.uint8),
123
+ }
124
+
125
+ boxes_xyxy = xywh2xyxy(boxes_xywh)
126
+ keep = nms_numpy(boxes_xyxy, confs, iou_thres)[:MAX_DET]
127
+
128
+ boxes_xyxy = boxes_xyxy[keep]
129
+ confs = confs[keep]
130
+ cls_ids = cls_ids[keep]
131
+ mask_coeffs = mask_coeffs[keep]
132
+
133
+ masks = process_mask(protos, mask_coeffs, boxes_xyxy.copy(),
134
+ (INPUT_SIZE, INPUT_SIZE))
135
+ boxes_xyxy, masks = scale_to_original(boxes_xyxy, masks, meta)
136
+
137
+ return {
138
+ "boxes": boxes_xyxy,
139
+ "scores": confs,
140
+ "classes": cls_ids,
141
+ "masks": masks,
142
+ }
@@ -0,0 +1,39 @@
1
+ """Image preprocessing: letterbox resize + normalize."""
2
+
3
+ import cv2
4
+ import numpy as np
5
+ from .config import INPUT_SIZE
6
+
7
+
8
+ def letterbox(img, new_size=INPUT_SIZE, color=(114, 114, 114)):
9
+ """Resize image with unchanged aspect ratio using padding."""
10
+ h, w = img.shape[:2]
11
+ r = min(new_size / h, new_size / w)
12
+ new_unpad = (int(round(w * r)), int(round(h * r)))
13
+ dw = new_size - new_unpad[0]
14
+ dh = new_size - new_unpad[1]
15
+ dw, dh = dw / 2, dh / 2
16
+
17
+ if (w, h) != new_unpad:
18
+ img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
19
+
20
+ top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
21
+ left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
22
+ img = cv2.copyMakeBorder(img, top, bottom, left, right,
23
+ cv2.BORDER_CONSTANT, value=color)
24
+ return img, r, (left, top)
25
+
26
+
27
+ def preprocess(img_bgr, size=INPUT_SIZE):
28
+ """BGR image -> normalized CHW float32 tensor + meta."""
29
+ img, ratio, (dx, dy) = letterbox(img_bgr, size)
30
+ img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
31
+ img_norm = img_rgb.astype(np.float32) / 255.0
32
+ img_chw = np.transpose(img_norm, (2, 0, 1))
33
+ img_batch = np.expand_dims(img_chw, 0)
34
+ meta = {
35
+ "ratio": ratio,
36
+ "pad": (dx, dy),
37
+ "orig_shape": img_bgr.shape[:2],
38
+ }
39
+ return img_batch, meta
@@ -0,0 +1,38 @@
1
+ """Drawing utilities for detection results."""
2
+
3
+ import cv2
4
+ import numpy as np
5
+ from .config import CLASS_NAMES, CLASS_COLORS
6
+
7
+
8
+ class Visualizer:
9
+ def __init__(self, alpha=0.5):
10
+ self.alpha = alpha
11
+
12
+ def draw(self, img_bgr, result):
13
+ out = img_bgr.copy()
14
+ boxes = result["boxes"]
15
+ scores = result["scores"]
16
+ cls = result["classes"]
17
+ masks = result["masks"]
18
+
19
+ # Masks
20
+ overlay = out.copy()
21
+ for i, m in enumerate(masks):
22
+ color = CLASS_COLORS[CLASS_NAMES[int(cls[i])]]
23
+ overlay[m > 0] = color
24
+ out = cv2.addWeighted(overlay, self.alpha, out, 1 - self.alpha, 0)
25
+
26
+ # Boxes + labels
27
+ for i, b in enumerate(boxes):
28
+ x1, y1, x2, y2 = map(int, b)
29
+ name = CLASS_NAMES[int(cls[i])]
30
+ color = CLASS_COLORS[name]
31
+ cv2.rectangle(out, (x1, y1), (x2, y2), color, 2)
32
+ label = f"{name} {scores[i]:.2f}"
33
+ (tw, th), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
34
+ cv2.rectangle(out, (x1, y1 - th - 6), (x1 + tw + 4, y1), color, -1)
35
+ cv2.putText(out, label, (x1 + 2, y1 - 4),
36
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
37
+ cv2.LINE_AA)
38
+ return out
@@ -0,0 +1,46 @@
1
+ Metadata-Version: 2.4
2
+ Name: slvehicle
3
+ Version: 0.1.0
4
+ Summary: Sri Lanka Vehicle Detection & Segmentation (slvy)
5
+ Author: SriLankaVehicleAI
6
+ Requires-Python: >=3.8
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: numpy>=1.23
9
+ Requires-Dist: opencv-python>=4.7
10
+ Requires-Dist: onnxruntime>=1.16
11
+ Provides-Extra: gpu
12
+ Requires-Dist: onnxruntime-gpu>=1.16; extra == "gpu"
13
+ Provides-Extra: torch
14
+ Requires-Dist: torch>=2.0; extra == "torch"
15
+ Requires-Dist: ultralytics>=8.1; extra == "torch"
16
+ Dynamic: author
17
+ Dynamic: requires-python
18
+
19
+ # slvehicle (slvy) — Sri Lanka Vehicle AI
20
+
21
+ Vehicle detection & instance segmentation library for Sri Lankan traffic.
22
+
23
+ **Classes:** bike, bus, car, lorry, tuktuk, van
24
+
25
+ ## Install
26
+
27
+ ```bash
28
+ pip install slvehicle # CPU
29
+ pip install slvehicle[gpu] # GPU (CUDA)
30
+ ```
31
+
32
+ ## Usage
33
+
34
+ ```python
35
+ from slvehicle import VehicleDetector
36
+
37
+ det = VehicleDetector() # auto device
38
+ result = det.detect('image.jpg')
39
+ det.detect_and_save('input_folder/', 'output_folder/')
40
+ ```
41
+
42
+ ## Performance
43
+
44
+ - mAP50: 97.3%
45
+ - Model size: 5.6 MB (FP16 ONNX)
46
+ - Speed: ~15 ms (GPU), ~80 ms (CPU)
@@ -0,0 +1,24 @@
1
+ MANIFEST.in
2
+ README.md
3
+ pyproject.toml
4
+ setup.py
5
+ slvehicle/__init__.py
6
+ slvehicle/config.py
7
+ slvehicle/detector.py
8
+ slvehicle/postprocessor.py
9
+ slvehicle/preprocessor.py
10
+ slvehicle/visualizer.py
11
+ slvehicle.egg-info/PKG-INFO
12
+ slvehicle.egg-info/SOURCES.txt
13
+ slvehicle.egg-info/dependency_links.txt
14
+ slvehicle.egg-info/requires.txt
15
+ slvehicle.egg-info/top_level.txt
16
+ slvehicle/engine/__init__.py
17
+ slvehicle/engine/auto_engine.py
18
+ slvehicle/engine/base.py
19
+ slvehicle/engine/cpu_engine.py
20
+ slvehicle/engine/gpu_engine.py
21
+ slvehicle/models/__init__.py
22
+ slvehicle/models/yolo11n_seg.pt
23
+ slvehicle/models/yolo11n_seg_fp16.onnx
24
+ tests/test_library.py
@@ -0,0 +1,10 @@
1
+ numpy>=1.23
2
+ opencv-python>=4.7
3
+ onnxruntime>=1.16
4
+
5
+ [gpu]
6
+ onnxruntime-gpu>=1.16
7
+
8
+ [torch]
9
+ torch>=2.0
10
+ ultralytics>=8.1
@@ -0,0 +1 @@
1
+ slvehicle
@@ -0,0 +1,33 @@
1
+ """Quick smoke test for the slvehicle library."""
2
+
3
+ import sys
4
+ from pathlib import Path
5
+
6
+ # Allow running before pip install
7
+ ROOT = Path(__file__).resolve().parent.parent
8
+ sys.path.insert(0, str(ROOT))
9
+
10
+ from slvehicle import VehicleDetector
11
+
12
+
13
+ def main():
14
+ print("=" * 60)
15
+ print(" slvy library test")
16
+ print("=" * 60)
17
+
18
+ det = VehicleDetector(device="auto")
19
+ print(f" Device: {det.engine.device}")
20
+ print(f" Model : {det.model_path}")
21
+ print(" [OK] Detector initialized.")
22
+
23
+ # If a sample image exists, run on it
24
+ sample = ROOT.parent / "data" / "samples"
25
+ if sample.exists():
26
+ results = det.detect_folder(sample)
27
+ print(f" Processed {len(results)} samples.")
28
+ else:
29
+ print(" (no data/samples folder — skipping batch test)")
30
+
31
+
32
+ if __name__ == "__main__":
33
+ main()