eye-cv 1.0.0__tar.gz → 1.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. {eye_cv-1.0.0 → eye_cv-1.0.1}/FEATURES_V2.md +8 -8
  2. {eye_cv-1.0.0 → eye_cv-1.0.1}/IMPROVEMENTS.md +12 -12
  3. eye_cv-1.0.1/PACKAGE_FEATURES.md +619 -0
  4. {eye_cv-1.0.0 → eye_cv-1.0.1}/PKG-INFO +3 -3
  5. {eye_cv-1.0.0 → eye_cv-1.0.1}/README.md +2 -2
  6. {eye_cv-1.0.0 → eye_cv-1.0.1}/USAGE.md +10 -10
  7. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/annotators/line.py +1 -1
  8. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/detection/auto_convert.py +1 -1
  9. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/draw/color.py +1 -1
  10. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye_cv.egg-info/PKG-INFO +3 -3
  11. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye_cv.egg-info/SOURCES.txt +1 -0
  12. {eye_cv-1.0.0 → eye_cv-1.0.1}/setup.py +1 -1
  13. {eye_cv-1.0.0 → eye_cv-1.0.1}/tests/test_v2.py +1 -1
  14. {eye_cv-1.0.0 → eye_cv-1.0.1}/DELIVERY_SUMMARY.md +0 -0
  15. {eye_cv-1.0.0 → eye_cv-1.0.1}/INSTALL_INTEGRATION.md +0 -0
  16. {eye_cv-1.0.0 → eye_cv-1.0.1}/LICENSE +0 -0
  17. {eye_cv-1.0.0 → eye_cv-1.0.1}/MANIFEST.in +0 -0
  18. {eye_cv-1.0.0 → eye_cv-1.0.1}/PUBLISHING.md +0 -0
  19. {eye_cv-1.0.0 → eye_cv-1.0.1}/QUICK_REFERENCE.md +0 -0
  20. {eye_cv-1.0.0 → eye_cv-1.0.1}/SECURITY.md +0 -0
  21. {eye_cv-1.0.0 → eye_cv-1.0.1}/TRACKER_GUIDE.md +0 -0
  22. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/__init__.py +0 -0
  23. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/__init___supervision_original.py +0 -0
  24. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/annotators/__init__.py +0 -0
  25. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/annotators/base.py +0 -0
  26. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/annotators/core.py +0 -0
  27. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/annotators/modern.py +0 -0
  28. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/annotators/trace.py +0 -0
  29. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/annotators/utils.py +0 -0
  30. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/assets/__init__.py +0 -0
  31. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/assets/downloader.py +0 -0
  32. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/assets/list.py +0 -0
  33. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/classification/__init__.py +0 -0
  34. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/classification/core.py +0 -0
  35. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/config.py +0 -0
  36. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/core/__init__.py +0 -0
  37. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/core/trackers/__init__.py +0 -0
  38. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/core/trackers/botsort_tracker.py +0 -0
  39. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/core/trackers/bytetrack_tracker.py +0 -0
  40. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/core/trackers/sort_tracker.py +0 -0
  41. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/core/tracking.py +0 -0
  42. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/dataset/__init__.py +0 -0
  43. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/dataset/core.py +0 -0
  44. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/dataset/formats/__init__.py +0 -0
  45. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/dataset/formats/coco.py +0 -0
  46. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/dataset/formats/pascal_voc.py +0 -0
  47. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/dataset/formats/yolo.py +0 -0
  48. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/dataset/utils.py +0 -0
  49. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/detection/__init__.py +0 -0
  50. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/detection/core.py +0 -0
  51. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/detection/detections_enhanced.py +0 -0
  52. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/detection/line_zone.py +0 -0
  53. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/detection/lmm.py +0 -0
  54. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/detection/overlap_filter.py +0 -0
  55. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/detection/tools/__init__.py +0 -0
  56. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/detection/tools/csv_sink.py +0 -0
  57. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/detection/tools/inference_slicer.py +0 -0
  58. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/detection/tools/json_sink.py +0 -0
  59. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/detection/tools/polygon_zone.py +0 -0
  60. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/detection/tools/smoother.py +0 -0
  61. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/detection/tools/smoothing.py +0 -0
  62. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/detection/tools/smoothing_config.py +0 -0
  63. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/detection/tools/transformers.py +0 -0
  64. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/detection/utils.py +0 -0
  65. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/draw/__init__.py +0 -0
  66. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/draw/utils.py +0 -0
  67. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/filters.py +0 -0
  68. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/geometry/__init__.py +0 -0
  69. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/geometry/core.py +0 -0
  70. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/geometry/utils.py +0 -0
  71. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/keypoint/__init__.py +0 -0
  72. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/keypoint/annotators.py +0 -0
  73. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/keypoint/core.py +0 -0
  74. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/keypoint/skeletons.py +0 -0
  75. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/metrics/__init__.py +0 -0
  76. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/metrics/core.py +0 -0
  77. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/metrics/detection.py +0 -0
  78. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/metrics/f1_score.py +0 -0
  79. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/metrics/mean_average_precision.py +0 -0
  80. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/metrics/mean_average_recall.py +0 -0
  81. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/metrics/precision.py +0 -0
  82. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/metrics/recall.py +0 -0
  83. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/metrics/utils/__init__.py +0 -0
  84. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/metrics/utils/object_size.py +0 -0
  85. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/metrics/utils/utils.py +0 -0
  86. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/py.typed +0 -0
  87. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/quick.py +0 -0
  88. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/tracker/__init__.py +0 -0
  89. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/tracker/byte_tracker/__init__.py +0 -0
  90. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/tracker/byte_tracker/core.py +0 -0
  91. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/tracker/byte_tracker/kalman_filter.py +0 -0
  92. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/tracker/byte_tracker/matching.py +0 -0
  93. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/tracker/byte_tracker/single_object_track.py +0 -0
  94. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/tracker/byte_tracker/utils.py +0 -0
  95. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/utils/__init__.py +0 -0
  96. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/utils/conversion.py +0 -0
  97. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/utils/file.py +0 -0
  98. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/utils/image.py +0 -0
  99. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/utils/internal.py +0 -0
  100. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/utils/iterables.py +0 -0
  101. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/utils/notebook.py +0 -0
  102. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/utils/video.py +0 -0
  103. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/utils_eye/__init__.py +0 -0
  104. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/utils_eye/geometry.py +0 -0
  105. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/utils_eye/nms.py +0 -0
  106. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/validators/__init__.py +0 -0
  107. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye/web.py +0 -0
  108. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye_cv.egg-info/dependency_links.txt +0 -0
  109. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye_cv.egg-info/requires.txt +0 -0
  110. {eye_cv-1.0.0 → eye_cv-1.0.1}/eye_cv.egg-info/top_level.txt +0 -0
  111. {eye_cv-1.0.0 → eye_cv-1.0.1}/pyproject.toml +0 -0
  112. {eye_cv-1.0.0 → eye_cv-1.0.1}/requirements.txt +0 -0
  113. {eye_cv-1.0.0 → eye_cv-1.0.1}/setup.cfg +0 -0
@@ -305,14 +305,14 @@ Eye v2.0 is perfect for:
305
305
 
306
306
  ## 📈 Comparison
307
307
 
308
- | Feature | Eye v2.0 | Eye v1.0 | Supervision | Ultralytics |
309
- |---------|----------|----------|-------------|-------------|
310
- | Modern annotators | ✅ 4 new | ❌ | ❌ | ❌ |
311
- | Auto-conversion | ✅ 11+ | ❌ | ❌ | ⚠️ YOLO only |
312
- | Easy smoothing | ✅ 5 presets | ⚠️ Manual | ❌ | ❌ |
313
- | Web-ready | ✅ Built-in | ❌ | ❌ | ❌ |
314
- | Zero CVEs | ✅ Audited | ✅ | ⚠️ | ⚠️ |
315
- | Dependencies | 3 core | 3 core | 10+ | 15+ |
308
+ | Feature | Eye v2.0 | Eye v1.0 |
309
+ |---------|----------|----------|
310
+ | Modern annotators | ✅ 4 new | ❌ |
311
+ | Auto-conversion | ✅ 11+ | ❌ |
312
+ | Easy smoothing | ✅ 5 presets | ⚠️ Manual |
313
+ | Web-ready | ✅ Built-in | ❌ |
314
+ | Zero CVEs | ✅ Audited | ✅ |
315
+ | Dependencies | 3 core | 3 core |
316
316
 
317
317
  ---
318
318
 
@@ -255,18 +255,18 @@ tracker = eye.Tracker(
255
255
  - **example.py**: Full working example
256
256
  - **test_api.py**: API tests demonstrating all features
257
257
 
258
- ## 🎉 Benefits Over Supervision
259
-
260
- | Feature | Supervision | Eye |
261
- |---------|-------------|-----|
262
- | Model conversion | Manual (3-5 lines) | Auto (1 line) |
263
- | Tracker setup | Manual tuning | Get recommendation |
264
- | Jitter reduction | Manual implementation | Built-in |
265
- | Color API | Complex (`ColorPalette`) | Simple (`Colors.RED`) |
266
- | One-liners | No | Yes (`detect`, `track`, `annotate`) |
267
- | Learning curve | Steep | Gentle |
268
- | Box inflation | Manual | Automatic |
269
- | Smoothing | Not included | Built-in (Kalman + Exponential) |
258
+ ## 🎉 Key Benefits
259
+
260
+ | Feature | Eye |
261
+ |---------|-----|
262
+ | Model conversion | Auto (1 line) |
263
+ | Tracker setup | Get recommendation |
264
+ | Jitter reduction | Built-in |
265
+ | Color API | Simple (`Colors.RED`) |
266
+ | One-liners | Yes (`detect`, `track`, `annotate`) |
267
+ | Learning curve | Gentle |
268
+ | Box inflation | Automatic |
269
+ | Smoothing | Built-in (Kalman + Exponential) |
270
270
 
271
271
  ## 🏁 Ready to Use
272
272
 
@@ -0,0 +1,619 @@
1
+ # eye-cv 1.0.0 - Package Features & Capabilities
2
+
3
+ Complete guide to all functionalities available in `pip install eye-cv==1.0.0`
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ # Minimal install (core features only)
9
+ pip install eye-cv==1.0.0
10
+
11
+ # Recommended: all optional features
12
+ pip install "eye-cv[all]==1.0.0"
13
+
14
+ # Or pick specific extras:
15
+ pip install "eye-cv[track]==1.0.0" # Advanced tracking (ByteTrack/BoT-SORT)
16
+ pip install "eye-cv[smooth]==1.0.0" # Kalman smoothing
17
+ pip install "eye-cv[web]==1.0.0" # Flask + FastAPI helpers
18
+ ```
19
+
20
+ ## Core Dependencies (Always Installed)
21
+
22
+ - `opencv-python >= 4.8.0` - Computer vision operations
23
+ - `numpy >= 1.24.0` - Array operations
24
+ - `matplotlib >= 3.7.0` - Color palettes and visualization
25
+ - `tqdm >= 4.66.0` - Progress bars
26
+
27
+ ## Optional Dependencies
28
+
29
+ | Extra | Package | Purpose |
30
+ |-------|---------|---------|
31
+ | `[smooth]` | `filterpy >= 1.4.5` | Kalman filter-based smoothing |
32
+ | `[track]` | `lap >= 0.4.0` | Linear assignment for ByteTrack/BoT-SORT |
33
+ | `[web]` | `flask >= 2.3.0`, `flask-cors >= 4.0.0` | Flask API helpers |
34
+ | `[web]` | `fastapi >= 0.104.0`, `uvicorn >= 0.24.0` | FastAPI server helpers |
35
+
36
+ ---
37
+
38
+ ## 1. Universal Model Format Conversion
39
+
40
+ **Auto-detect and convert detection results from 11+ model formats without manual box extraction.**
41
+
42
+ ### Supported Formats
43
+
44
+ - ✅ YOLO (Ultralytics)
45
+ - ✅ PyTorch (torchvision, custom models)
46
+ - ✅ TensorFlow / Keras
47
+ - ✅ OpenCV DNN
48
+ - ✅ ONNX / TensorRT
49
+ - ✅ MMDetection
50
+ - ✅ Detectron2
51
+ - ✅ PaddlePaddle
52
+ - ✅ OpenVINO
53
+ - ✅ Raw NumPy arrays
54
+
55
+ ### API
56
+
57
+ ```python
58
+ import eye
59
+
60
+ # Automatic detection and conversion
61
+ detections = eye.auto_convert(model_output)
62
+
63
+ # Or use specific converters
64
+ detections = eye.from_yolo(results)
65
+ detections = eye.from_pytorch(outputs)
66
+ detections = eye.from_tensorflow(boxes, scores, classes)
67
+ detections = eye.from_opencv(detections)
68
+ ```
69
+
70
+ ---
71
+
72
+ ## 2. Detections - Immutable Data Structure
73
+
74
+ **Thread-safe, immutable detection container with automatic caching.**
75
+
76
+ ### Core Properties
77
+
78
+ ```python
79
+ detections.xyxy # Bounding boxes [x1, y1, x2, y2]
80
+ detections.confidence # Detection scores
81
+ detections.class_id # Class labels
82
+ detections.tracker_id # Tracking IDs (after tracking)
83
+ detections.mask # Segmentation masks (if available)
84
+ detections.data # Custom metadata dict
85
+ ```
86
+
87
+ ### Cached Computed Properties
88
+
89
+ ```python
90
+ detections.area # Box areas (cached)
91
+ detections.center # Box centers (cached)
92
+ detections.aspect_ratio # Width/height ratios (cached)
93
+ ```
94
+
95
+ ### Immutable Operations
96
+
97
+ ```python
98
+ # Filtering (returns new instance)
99
+ filtered = detections.filter(detections.confidence > 0.5)
100
+ filtered = detections[detections.class_id == 0]
101
+
102
+ # Updating (returns new instance)
103
+ updated = detections.with_confidence(new_scores)
104
+ updated = detections.with_tracker_id(track_ids)
105
+
106
+ # Slicing
107
+ first_10 = detections[:10]
108
+ ```
109
+
110
+ ---
111
+
112
+ ## 3. Multi-Algorithm Object Tracking
113
+
114
+ **3 tracking algorithms bundled with smart recommendations.**
115
+
116
+ ### Available Trackers
117
+
118
+ | Tracker | Speed | Accuracy | Use Case |
119
+ |---------|-------|----------|----------|
120
+ | **SORT** | ⚡⚡⚡ Fast | Good | Clear scenes, real-time |
121
+ | **ByteTrack** | ⚡⚡ Medium | Better | Crowded/occluded scenes |
122
+ | **BoT-SORT** | ⚡ Slower | Best | Maximum accuracy needed |
123
+
124
+ ### Basic Usage
125
+
126
+ ```python
127
+ import eye
128
+
129
+ # Create tracker with box inflation (Eye's innovation)
130
+ tracker = eye.Tracker(
131
+ tracker_type=eye.TrackerType.BYTETRACK,
132
+ inflation_factor=2.0 # Inflates boxes 2x for matching
133
+ )
134
+
135
+ # Track detections
136
+ detections = eye.auto_convert(model(frame))
137
+ tracked = tracker.update(detections)
138
+
139
+ # Access tracking IDs
140
+ for box, track_id in zip(tracked.xyxy, tracked.tracker_id):
141
+ print(f"Object {track_id}: {box}")
142
+ ```
143
+
144
+ ### Smart Recommendations
145
+
146
+ ```python
147
+ # Get recommended settings for your use case
148
+ rec = eye.get_tracker_recommendation(eye.UseCase.TRAFFIC_DENSE)
149
+
150
+ tracker = eye.Tracker(
151
+ tracker_type=rec['tracker_type'],
152
+ inflation_factor=rec['inflation_factor'],
153
+ **rec['config']
154
+ )
155
+ ```
156
+
157
+ **Available Use Cases:**
158
+ - `TRAFFIC_CLEAR` - Highway, clear visibility
159
+ - `TRAFFIC_DENSE` - Urban traffic, frequent occlusions
160
+ - `PEDESTRIANS` - Crowded sidewalks, crosswalks
161
+ - `SPORTS` - Fast movement, camera motion
162
+ - `SURVEILLANCE` - Security cameras, slow-moving objects
163
+ - `WAREHOUSE` - Indoor logistics, predictable paths
164
+
165
+ ---
166
+
167
+ ## 4. Jitter Reduction & Smoothing
168
+
169
+ **Built-in smoothing to stabilize bounding boxes.**
170
+
171
+ ### Exponential Smoothing (Always Available)
172
+
173
+ ```python
174
+ tracker = eye.Tracker(
175
+ enable_smoothing=True, # Default
176
+ smoothing_alpha=0.3 # 0.1 = very smooth, 0.5 = fast response
177
+ )
178
+ ```
179
+
180
+ ### Kalman Filter Smoothing (Requires `[smooth]` extra)
181
+
182
+ ```python
183
+ from eye import KalmanSmoothing
184
+
185
+ smoother = KalmanSmoothing(process_noise=0.05, measurement_noise=0.1)
186
+ smoothed_box = smoother.update(track_id, box)
187
+ ```
188
+
189
+ ### Smoothing Presets
190
+
191
+ ```python
192
+ from eye import SmoothingConfig, SmoothingPreset
193
+
194
+ # Easy presets
195
+ config = SmoothingConfig.from_preset(SmoothingPreset.MEDIUM)
196
+
197
+ tracker = eye.Tracker(
198
+ enable_smoothing=True,
199
+ smoothing_alpha=config.alpha
200
+ )
201
+ ```
202
+
203
+ **Presets:** `NONE`, `LIGHT`, `MEDIUM`, `HEAVY`, `ULTRA`
204
+
205
+ ---
206
+
207
+ ## 5. Professional Annotators
208
+
209
+ **Draw detection results on frames with modern styling.**
210
+
211
+ ### Core Annotators (Always Available)
212
+
213
+ ```python
214
+ import eye
215
+
216
+ # Bounding boxes with rounded corners
217
+ box_annotator = eye.BoxAnnotator(
218
+ thickness=2,
219
+ color=eye.ColorPalette.default()
220
+ )
221
+
222
+ # Labels with shadows
223
+ label_annotator = eye.LabelAnnotator(
224
+ text_scale=0.5,
225
+ text_thickness=1
226
+ )
227
+
228
+ # Fading motion trails
229
+ trace_annotator = eye.TraceAnnotator(
230
+ trace_length=30,
231
+ thickness=2
232
+ )
233
+
234
+ # Heatmaps
235
+ heatmap_annotator = eye.HeatMapAnnotator()
236
+
237
+ # Annotate frame
238
+ annotated = box_annotator.annotate(frame, detections)
239
+ annotated = label_annotator.annotate(annotated, detections, labels)
240
+ ```
241
+
242
+ ### Modern Annotators (v2.0 Innovations)
243
+
244
+ ```python
245
+ from eye import (
246
+ GradientBoxAnnotator,
247
+ NeonTraceAnnotator,
248
+ ShadowBoxAnnotator,
249
+ FPSAnnotator,
250
+ InfoAnnotator
251
+ )
252
+
253
+ # Gradient-filled boxes
254
+ gradient = GradientBoxAnnotator()
255
+
256
+ # Glowing neon trails
257
+ neon = NeonTraceAnnotator(glow_intensity=2.0)
258
+
259
+ # Boxes with drop shadows
260
+ shadow = ShadowBoxAnnotator(shadow_offset=5)
261
+
262
+ # FPS counter
263
+ fps = FPSAnnotator()
264
+
265
+ # Info panel overlay
266
+ info = InfoAnnotator()
267
+ ```
268
+
269
+ ---
270
+
271
+ ## 6. Zone Management & Analytics
272
+
273
+ **Define zones and count objects crossing boundaries.**
274
+
275
+ ```python
276
+ import eye
277
+ import numpy as np
278
+
279
+ # Define zone polygon
280
+ zone = eye.PolygonZone(
281
+ polygon=np.array([[100, 300], [200, 300], [200, 400], [100, 400]]),
282
+ triggering_anchors=[eye.Position.CENTER]
283
+ )
284
+
285
+ # Filter detections in zone
286
+ in_zone = zone.trigger(detections)
287
+
288
+ # Get counts
289
+ print(f"Current: {zone.current_count}, Total: {len(in_zone)}")
290
+
291
+ # Visualize zone
292
+ zone_annotator = eye.PolygonZoneAnnotator(
293
+ zone=zone,
294
+ color=eye.Color.from_hex("#FF0000"),
295
+ thickness=2
296
+ )
297
+ annotated = zone_annotator.annotate(frame)
298
+ ```
299
+
300
+ ### Line Zone (Virtual Trip Wire)
301
+
302
+ ```python
303
+ line_zone = eye.LineZone(
304
+ start=eye.Point(100, 300),
305
+ end=eye.Point(500, 300)
306
+ )
307
+
308
+ crossed = line_zone.trigger(detections)
309
+ ```
310
+
311
+ ---
312
+
313
+ ## 7. Video Processing
314
+
315
+ **Efficient video reading/writing with progress tracking.**
316
+
317
+ ```python
318
+ import eye
319
+
320
+ # Read video info
321
+ video_info = eye.VideoInfo.from_video_path("input.mp4")
322
+ print(f"FPS: {video_info.fps}, Frames: {video_info.total_frames}")
323
+
324
+ # Process video with progress bar
325
+ def process_frame(frame, frame_idx):
326
+ # Detection
327
+ results = model(frame)
328
+ detections = eye.auto_convert(results)
329
+
330
+ # Tracking
331
+ detections = tracker.update(detections)
332
+
333
+ # Annotation
334
+ annotated = box_annotator.annotate(frame, detections)
335
+ return annotated
336
+
337
+ eye.process_video(
338
+ source_path="input.mp4",
339
+ target_path="output.mp4",
340
+ callback=process_frame
341
+ )
342
+
343
+ # Or use frame generator for custom processing
344
+ for frame in eye.get_video_frames_generator("input.mp4"):
345
+ processed = process_frame(frame, 0)
346
+ ```
347
+
348
+ ### Video Writer
349
+
350
+ ```python
351
+ writer = eye.VideoWriter(
352
+ target_path="output.mp4",
353
+ video_info=video_info
354
+ )
355
+
356
+ for frame in frames:
357
+ writer.write_frame(frame)
358
+
359
+ writer.close()
360
+ ```
361
+
362
+ ---
363
+
364
+ ## 8. Color Palettes
365
+
366
+ **Pre-defined and custom color schemes.**
367
+
368
+ ```python
369
+ from eye import Color, ColorPalette
370
+
371
+ # Use built-in palette
372
+ palette = ColorPalette.default()
373
+
374
+ # Create custom palette
375
+ custom = ColorPalette([
376
+ Color(255, 0, 0), # Red
377
+ Color.from_hex("#00FF00"), # Green
378
+ Color(0, 0, 255) # Blue
379
+ ])
380
+
381
+ # Get color by class ID
382
+ color = palette.by_idx(class_id)
383
+ ```
384
+
385
+ ---
386
+
387
+ ## 9. Web API Support (Requires `[web]` extra)
388
+
389
+ **Flask and FastAPI helpers for REST APIs.**
390
+
391
+ ### Flask (5-line setup)
392
+
393
+ ```python
394
+ from ultralytics import YOLO
395
+ import eye
396
+
397
+ model = YOLO("yolo11n.pt")
398
+ app = eye.create_flask_app(model, class_names=model.names)
399
+ app.run(host='0.0.0.0', port=5000)
400
+ ```
401
+
402
+ **API Endpoint:** `POST /detect` with image upload
403
+
404
+ **Response:**
405
+ ```json
406
+ {
407
+ "success": true,
408
+ "detections": [
409
+ {
410
+ "bbox": [x1, y1, x2, y2],
411
+ "confidence": 0.95,
412
+ "class_id": 0,
413
+ "class_name": "person"
414
+ }
415
+ ],
416
+ "processing_time_ms": 15.3
417
+ }
418
+ ```
419
+
420
+ ### FastAPI
421
+
422
+ ```python
423
+ app = eye.create_fastapi_app(model, class_names=model.names)
424
+
425
+ # Run with: uvicorn main:app --reload
426
+ ```
427
+
428
+ ---
429
+
430
+ ## 10. Utility Functions
431
+
432
+ ### Geometry Operations
433
+
434
+ ```python
435
+ from eye.detection.utils import (
436
+ box_iou_batch, # IoU between box sets
437
+ clip_boxes, # Clip boxes to image bounds
438
+ scale_boxes, # Scale boxes by factor
439
+ move_boxes, # Translate boxes
440
+ xywh_to_xyxy, # Convert box formats
441
+ polygon_to_mask # Rasterize polygon
442
+ )
443
+ ```
444
+
445
+ ### Image Operations
446
+
447
+ ```python
448
+ from eye.utils.image import (
449
+ crop_image, # Crop to bounding box
450
+ resize_image, # Resize with aspect ratio
451
+ letterbox_image, # Pad to square
452
+ overlay_image # Alpha blend images
453
+ )
454
+ ```
455
+
456
+ ### Non-Maximum Suppression
457
+
458
+ ```python
459
+ from eye.detection.overlap_filter import (
460
+ box_non_max_suppression,
461
+ mask_non_max_suppression
462
+ )
463
+
464
+ # Remove overlapping detections
465
+ filtered = box_non_max_suppression(detections, iou_threshold=0.5)
466
+ ```
467
+
468
+ ---
469
+
470
+ ## Quick Start Examples
471
+
472
+ ### 1. Basic Detection + Tracking
473
+
474
+ ```python
475
+ import eye
476
+ from ultralytics import YOLO
477
+ import cv2
478
+
479
+ model = YOLO("yolo11n.pt")
480
+ tracker = eye.Tracker(tracker_type=eye.TrackerType.SORT)
481
+ box_annotator = eye.BoxAnnotator()
482
+
483
+ cap = cv2.VideoCapture("video.mp4")
484
+ while True:
485
+ ret, frame = cap.read()
486
+ if not ret:
487
+ break
488
+
489
+ # Detect and track
490
+ results = model(frame)[0]
491
+ detections = eye.from_yolo(results)
492
+ tracked = tracker.update(detections)
493
+
494
+ # Annotate
495
+ annotated = box_annotator.annotate(frame, tracked)
496
+ cv2.imshow("Tracking", annotated)
497
+
498
+ if cv2.waitKey(1) & 0xFF == ord('q'):
499
+ break
500
+
501
+ cap.release()
502
+ cv2.destroyAllWindows()
503
+ ```
504
+
505
+ ### 2. Traffic Monitoring with Zone Counting
506
+
507
+ ```python
508
+ import eye
509
+ import numpy as np
510
+
511
+ # Setup
512
+ tracker = eye.Tracker(
513
+ tracker_type=eye.TrackerType.BYTETRACK,
514
+ inflation_factor=2.0
515
+ )
516
+
517
+ zone = eye.PolygonZone(
518
+ polygon=np.array([[400, 300], [800, 300], [800, 500], [400, 500]])
519
+ )
520
+
521
+ # Process video
522
+ def callback(frame, idx):
523
+ detections = eye.auto_convert(model(frame))
524
+ tracked = tracker.update(detections)
525
+
526
+ # Count in zone
527
+ in_zone = zone.trigger(tracked)
528
+
529
+ # Annotate
530
+ annotated = box_annotator.annotate(frame, tracked)
531
+ annotated = zone_annotator.annotate(annotated)
532
+
533
+ return annotated
534
+
535
+ eye.process_video("traffic.mp4", "output.mp4", callback)
536
+ ```
537
+
538
+ ### 3. Multi-Class Detection with Filtering
539
+
540
+ ```python
541
+ import eye
542
+
543
+ detections = eye.auto_convert(model(frame))
544
+
545
+ # Filter by confidence
546
+ high_conf = detections.filter(detections.confidence > 0.7)
547
+
548
+ # Filter by class (e.g., only people and cars)
549
+ people_cars = high_conf[
550
+ (high_conf.class_id == 0) | (high_conf.class_id == 2)
551
+ ]
552
+
553
+ # Track and annotate
554
+ tracked = tracker.update(people_cars)
555
+ annotated = box_annotator.annotate(frame, tracked)
556
+ ```
557
+
558
+ ---
559
+
560
+ ## Performance Tips
561
+
562
+ 1. **Use box inflation** (1.5-2.5x) for better tracking in crowded scenes
563
+ 2. **Enable smoothing** for stable bounding boxes
564
+ 3. **Batch operations** when possible (detections are vectorized)
565
+ 4. **Filter early** to reduce tracking overhead
566
+ 5. **Use appropriate tracker** - SORT for speed, ByteTrack for crowded scenes
567
+
568
+ ---
569
+
570
+ ## Type Hints & IDE Support
571
+
572
+ Eye includes full type annotations (`py.typed` marker):
573
+
574
+ ```python
575
+ import eye
576
+
577
+ # All functions and classes have proper type hints
578
+ detections: eye.Detections = eye.auto_convert(results)
579
+ tracker: eye.Tracker = eye.Tracker()
580
+ ```
581
+
582
+ ---
583
+
584
+ ## Thread Safety
585
+
586
+ - ✅ `Detections` class is fully immutable and thread-safe
587
+ - ✅ Annotators are stateless and thread-safe
588
+ - ⚠️ `Tracker` maintains state - use one per thread/video
589
+ - ⚠️ Smoothers maintain state - use one per tracked object
590
+
591
+ ---
592
+
593
+ ## Security Audit
594
+
595
+ ✅ **Zero CVEs** (audited 2024 Q4)
596
+ ✅ **A+ Rating** - All dependencies actively maintained
597
+ ✅ **No deprecated packages**
598
+
599
+ ---
600
+
601
+ ## License
602
+
603
+ MIT License - Free for commercial use
604
+
605
+ ---
606
+
607
+ ## Support & Documentation
608
+
609
+ - 📦 PyPI: https://pypi.org/project/eye-cv/
610
+ - 🐙 GitHub: https://github.com/danssou/eye
611
+ - 📚 Examples: See `examples/` directory in repo (20+ complete examples)
612
+ - 🎯 Tracker Guide: See `TRACKER_GUIDE.md` for decision tree
613
+ - 🔧 Usage Patterns: See `USAGE.md` for common workflows
614
+
615
+ ---
616
+
617
+ **Version:** 1.0.0
618
+ **Python:** >=3.8
619
+ **Status:** Production-Ready
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: eye-cv
3
- Version: 1.0.0
3
+ Version: 1.0.1
4
4
  Summary: Eye - Simple & Powerful Computer Vision. Auto-convert, smart tracking, jitter reduction.
5
5
  Author: Dakar Project
6
6
  Classifier: Development Status :: 4 - Beta
@@ -63,7 +63,7 @@ Dynamic: summary
63
63
 
64
64
  **Professional Computer Vision Toolkit for Commercial Projects**
65
65
 
66
- A comprehensive, production-ready library for object detection, tracking, and annotation - built as a reusable alternative to Supervision with enhanced features and innovations.
66
+ A comprehensive, production-ready library for object detection, tracking, and annotation with enhanced features and innovations.
67
67
 
68
68
  ## 🚀 Features
69
69
 
@@ -182,7 +182,7 @@ def process_frame(frame, frame_idx):
182
182
  video.process(process_frame)
183
183
  ```
184
184
 
185
- ## 🎯 Innovations Over Supervision
185
+ ## 🎯 Key Innovations
186
186
 
187
187
  1. **Immutable Operations**: Thread-safe, prevents bugs
188
188
  2. **Automatic Caching**: Faster repeated computations
@@ -2,7 +2,7 @@
2
2
 
3
3
  **Professional Computer Vision Toolkit for Commercial Projects**
4
4
 
5
- A comprehensive, production-ready library for object detection, tracking, and annotation - built as a reusable alternative to Supervision with enhanced features and innovations.
5
+ A comprehensive, production-ready library for object detection, tracking, and annotation with enhanced features and innovations.
6
6
 
7
7
  ## 🚀 Features
8
8
 
@@ -121,7 +121,7 @@ def process_frame(frame, frame_idx):
121
121
  video.process(process_frame)
122
122
  ```
123
123
 
124
- ## 🎯 Innovations Over Supervision
124
+ ## 🎯 Key Innovations
125
125
 
126
126
  1. **Immutable Operations**: Thread-safe, prevents bugs
127
127
  2. **Automatic Caching**: Faster repeated computations
@@ -236,16 +236,16 @@ video.process(process)
236
236
  - **Smoothing**: α=0.3 (balanced) to 0.1 (very smooth)
237
237
  - **Tracker**: SORT (fast) → ByteTrack (robust) → BoT-SORT (advanced)
238
238
 
239
- ## Comparison vs Supervision
240
-
241
- | Feature | Supervision | Eye |
242
- |---------|-------------|-----|
243
- | Model conversion | Manual | Auto |
244
- | Tracker setup | Manual config | Get recommendation |
245
- | Jitter reduction | Manual | Built-in |
246
- | Color API | Complex | Simple (Colors.RED) |
247
- | One-liners | No | Yes (detect/track/annotate) |
248
- | Learning curve | Steep | Gentle |
239
+ ## Key Features
240
+
241
+ | Feature | Eye |
242
+ |---------|-----|
243
+ | Model conversion | Auto |
244
+ | Tracker setup | Get recommendation |
245
+ | Jitter reduction | Built-in |
246
+ | Color API | Simple (Colors.RED) |
247
+ | One-liners | Yes (detect/track/annotate) |
248
+ | Learning curve | Gentle |
249
249
 
250
250
  ---
251
251
 
@@ -1,4 +1,4 @@
1
- """Line zone counting and annotator (supervision-like LineZone).
1
+ """Line zone counting and annotator.
2
2
 
3
3
  Minimal implementation: track per-tracker last side of the line and detect crossings.
4
4
  """
@@ -1,4 +1,4 @@
1
- """Universal auto-conversion for ALL model formats - Works with supervision!"""
1
+ """Universal auto-conversion for ALL model formats."""
2
2
 
3
3
  import numpy as np
4
4
  from typing import Any, Optional, Union
@@ -29,7 +29,7 @@ class Color:
29
29
  int(hex_color[4:6], 16)
30
30
  )
31
31
 
32
- # Predefined colors for supervision compatibility
32
+ # Predefined color constants
33
33
  Color.ROBOFLOW = Color(255, 107, 0)
34
34
  Color.WHITE = Color(255, 255, 255)
35
35
  Color.BLACK = Color(0, 0, 0)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: eye-cv
3
- Version: 1.0.0
3
+ Version: 1.0.1
4
4
  Summary: Eye - Simple & Powerful Computer Vision. Auto-convert, smart tracking, jitter reduction.
5
5
  Author: Dakar Project
6
6
  Classifier: Development Status :: 4 - Beta
@@ -63,7 +63,7 @@ Dynamic: summary
63
63
 
64
64
  **Professional Computer Vision Toolkit for Commercial Projects**
65
65
 
66
- A comprehensive, production-ready library for object detection, tracking, and annotation - built as a reusable alternative to Supervision with enhanced features and innovations.
66
+ A comprehensive, production-ready library for object detection, tracking, and annotation with enhanced features and innovations.
67
67
 
68
68
  ## 🚀 Features
69
69
 
@@ -182,7 +182,7 @@ def process_frame(frame, frame_idx):
182
182
  video.process(process_frame)
183
183
  ```
184
184
 
185
- ## 🎯 Innovations Over Supervision
185
+ ## 🎯 Key Innovations
186
186
 
187
187
  1. **Immutable Operations**: Thread-safe, prevents bugs
188
188
  2. **Automatic Caching**: Faster repeated computations
@@ -4,6 +4,7 @@ IMPROVEMENTS.md
4
4
  INSTALL_INTEGRATION.md
5
5
  LICENSE
6
6
  MANIFEST.in
7
+ PACKAGE_FEATURES.md
7
8
  PUBLISHING.md
8
9
  QUICK_REFERENCE.md
9
10
  README.md
@@ -5,7 +5,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
5
5
 
6
6
  setup(
7
7
  name="eye-cv",
8
- version="1.0.0",
8
+ version="1.0.1",
9
9
  author="Dakar Project",
10
10
  description="Eye - Simple & Powerful Computer Vision. Auto-convert, smart tracking, jitter reduction.",
11
11
  long_description=long_description,
@@ -1,4 +1,4 @@
1
- """Smoke tests for Eye (supervision-style) package."""
1
+ """Smoke tests for Eye package."""
2
2
 
3
3
  import os
4
4
  import sys
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes