pyw-vision 0.0.0__tar.gz → 0.0.0.post2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,507 @@
1
+ Metadata-Version: 2.4
2
+ Name: pyw-vision
3
+ Version: 0.0.0.post2
4
+ Summary: Reserved placeholder for pyw-vision (vision utilities)
5
+ Project-URL: Homepage, https://github.com/pythonWoods/pyw-vision
6
+ Project-URL: Documentation, https://pythonwoods.dev/docs/pyw-vision/latest/
7
+ Project-URL: Issues, https://github.com/pythonWoods/pyw-vision/issues
8
+ Project-URL: Changelog, https://github.com/pythonWoods/pyw-vision/releases
9
+ Author: pythonWoods
10
+ License: MIT
11
+ License-File: LICENSE
12
+ Classifier: Development Status :: 2 - Pre-Alpha
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3 :: Only
15
+ Classifier: Programming Language :: Python :: 3.11
16
+ Classifier: Typing :: Typed
17
+ Requires-Python: >=3.9
18
+ Requires-Dist: pyw-core>=0.0.0
19
+ Description-Content-Type: text/markdown
20
+
21
+ # pyw-vision 👁️
22
+ [![PyPI](https://img.shields.io/pypi/v/pyw-vision.svg)](https://pypi.org/project/pyw-vision/)
23
+ [![CI](https://github.com/pythonWoods/pyw-vision/actions/workflows/ci.yml/badge.svg)](https://github.com/pythonWoods/pyw-vision/actions/workflows/ci.yml)
24
+ [![License](https://img.shields.io/badge/License-MIT-yellow.svg)](LICENSE)
25
+
26
+ > Computer vision utilities & helpers per l'ecosistema **pythonWoods**.
27
+
28
+ ## Overview
29
+
30
+ **pyw-vision** fornisce utilities e helpers per computer vision, con focus su semplicità d'uso e performance. Parte dell'ecosistema pythonWoods, si integra perfettamente con gli altri moduli per soluzioni complete di image processing e analisi visiva.
31
+
32
+ ## Features
33
+
34
+ ### 🖼️ Image Processing
35
+ - **Resize & Transform**: Ridimensionamento intelligente con aspect ratio
36
+ - **Filters & Effects**: Blur, sharpen, brightness, contrast
37
+ - **Format Conversion**: Supporto multi-formato (JPEG, PNG, WebP, TIFF)
38
+ - **Batch Processing**: Elaborazione efficiente di grosse quantità di immagini
39
+
40
+ ### 🎯 Object Detection
41
+ - **YOLO Integration**: Supporto per YOLOv5/v8 con model caching
42
+ - **Custom Models**: Caricamento di modelli personalizzati
43
+ - **Bounding Boxes**: Utilities per gestione e rendering bbox
44
+ - **Confidence Filtering**: Filtri automatici per detection quality
45
+
46
+ ### 🔍 Feature Extraction
47
+ - **Keypoints Detection**: SIFT, ORB, Harris corners
48
+ - **Descriptors**: Feature matching e similarity
49
+ - **Template Matching**: Ricerca pattern in immagini
50
+ - **Contour Analysis**: Shape detection e analysis
51
+
52
+ ### 📊 Analysis Tools
53
+ - **Image Metrics**: Histogram, statistics, quality metrics
54
+ - **Comparison**: SSIM, MSE, perceptual difference
55
+ - **Color Analysis**: Palette extraction, dominant colors
56
+ - **Geometric**: Perspective correction, distortion removal
57
+
58
+ ## Installation
59
+
60
+ ```bash
61
+ # Base installation
62
+ pip install pyw-vision
63
+
64
+ # Con supporto deep learning (YOLOv8, PyTorch)
65
+ pip install pyw-vision[ml]
66
+
67
+ # Computer vision completo (include motion detection)
68
+ pip install pyw-cv # Bundle: pyw-vision + pyw-motion
69
+ ```
70
+
71
+ ## Quick Start
72
+
73
+ ### Basic Image Processing
74
+
75
+ ```python
76
+ from pyw.vision import Image, ImageProcessor
77
+
78
+ # Carica e processa immagine
79
+ img = Image.from_file("photo.jpg")
80
+
81
+ # Chain processing
82
+ processed = (img
83
+ .resize(width=800, keep_aspect=True)
84
+ .enhance_contrast(1.2)
85
+ .apply_blur(radius=2)
86
+ .convert_format("webp")
87
+ )
88
+
89
+ processed.save("output.webp", quality=85)
90
+ ```
91
+
92
+ ### Object Detection
93
+
94
+ ```python
95
+ from pyw.vision import ObjectDetector
96
+
97
+ # Setup detector con model caching automatico
98
+ detector = ObjectDetector.yolo_v8(model_size="medium")
99
+
100
+ # Detection su singola immagine
101
+ results = detector.detect("image.jpg", confidence=0.5)
102
+
103
+ for detection in results:
104
+ print(f"Object: {detection.class_name} ({detection.confidence:.2f})")
105
+ print(f"Bbox: {detection.bbox}")
106
+
107
+ # Batch detection
108
+ batch_results = detector.detect_batch([
109
+ "img1.jpg", "img2.jpg", "img3.jpg"
110
+ ], max_workers=4)
111
+ ```
112
+
113
+ ### Feature Matching
114
+
115
+ ```python
116
+ from pyw.vision import FeatureExtractor, FeatureMatcher
117
+
118
+ # Estrai features da due immagini
119
+ extractor = FeatureExtractor.sift(n_features=1000)
120
+ features1 = extractor.extract("template.jpg")
121
+ features2 = extractor.extract("scene.jpg")
122
+
123
+ # Match features
124
+ matcher = FeatureMatcher(algorithm="flann")
125
+ matches = matcher.match(features1, features2)
126
+
127
+ # Trova homography
128
+ homography = matcher.find_homography(matches, min_matches=10)
129
+ if homography is not None:
130
+ print("Template found in scene!")
131
+ ```
132
+
133
+ ## Advanced Usage
134
+
135
+ ### Custom Image Pipeline
136
+
137
+ ```python
138
+ from pyw.vision import ImagePipeline, filters
139
+
140
+ # Definisci pipeline custom
141
+ pipeline = ImagePipeline([
142
+ filters.NormalizeLighting(),
143
+ filters.RemoveNoise(method="bilateral"),
144
+ filters.EnhanceSharpness(factor=1.5),
145
+ filters.ColorBalance(auto=True)
146
+ ])
147
+
148
+ # Applica a singola immagine
149
+ result = pipeline.process("noisy_image.jpg")
150
+
151
+ # Batch processing con progress
152
+ results = pipeline.process_batch(
153
+ ["img1.jpg", "img2.jpg", "img3.jpg"],
154
+ output_dir="processed/",
155
+ show_progress=True
156
+ )
157
+ ```
158
+
159
+ ### Smart Cropping
160
+
161
+ ```python
162
+ from pyw.vision import SmartCropper
163
+
164
+ cropper = SmartCropper(
165
+ target_ratio=(16, 9),
166
+ focus_detection=True # Usa face/object detection
167
+ )
168
+
169
+ # Crop intelligente mantenendo soggetti importanti
170
+ cropped = cropper.crop("portrait.jpg")
171
+ cropped.save("cropped_16x9.jpg")
172
+
173
+ # Crop multipli per social media
174
+ variants = cropper.crop_variants("image.jpg", formats=[
175
+ ("instagram_post", 1080, 1080),
176
+ ("instagram_story", 1080, 1920),
177
+ ("facebook_cover", 1200, 630)
178
+ ])
179
+ ```
180
+
181
+ ### Real-time Processing
182
+
183
+ ```python
184
+ from pyw.vision import VideoProcessor
185
+ import cv2
186
+
187
+ # Setup video processor
188
+ processor = VideoProcessor(
189
+ input_source=0, # Webcam
190
+ fps_limit=30
191
+ )
192
+
193
+ @processor.frame_handler
194
+ def process_frame(frame):
195
+ # Applica detection in real-time
196
+ detections = detector.detect(frame, confidence=0.6)
197
+
198
+ # Disegna bounding boxes
199
+ for det in detections:
200
+ frame = det.draw_on(frame, color="red", thickness=2)
201
+
202
+ return frame
203
+
204
+ # Avvia processing
205
+ processor.start()
206
+ ```
207
+
208
+ ### Integration con pyw-fs
209
+
210
+ ```python
211
+ from pyw.vision import Image
212
+ from pyw.fs import FileSystem
213
+
214
+ # Usa filesystem unificato (local/S3/GCS)
215
+ fs = FileSystem.from_url("s3://my-bucket/images/")
216
+
217
+ # Processa immagini remote
218
+ for image_path in fs.glob("*.jpg"):
219
+ img = Image.from_fs(fs, image_path)
220
+
221
+ # Genera thumbnail
222
+ thumb = img.resize(width=200, keep_aspect=True)
223
+
224
+ # Salva thumbnail
225
+ thumb_path = image_path.replace(".jpg", "_thumb.jpg")
226
+ thumb.save_to_fs(fs, thumb_path)
227
+ ```
228
+
229
+ ## Configuration
230
+
231
+ ```python
232
+ from pyw.vision import VisionConfig
233
+ from pyw.core import BaseConfig
234
+
235
+ class MyVisionConfig(BaseConfig):
236
+ # Model paths e caching
237
+ model_cache_dir: str = "~/.pyw/vision/models"
238
+ max_cache_size_gb: float = 5.0
239
+
240
+ # Default processing settings
241
+ default_image_quality: int = 85
242
+ max_image_dimension: int = 4096
243
+
244
+ # Performance
245
+ max_workers: int = 4
246
+ use_gpu: bool = True
247
+ memory_limit_mb: int = 2048
248
+
249
+ # Applica config globalmente
250
+ VisionConfig.set_global(MyVisionConfig())
251
+ ```
252
+
253
+ ## Performance Optimization
254
+
255
+ ### GPU Acceleration
256
+
257
+ ```python
258
+ from pyw.vision import accelerate
259
+
260
+ # Auto-detect e configura GPU
261
+ accelerate.setup_gpu(memory_fraction=0.8)
262
+
263
+ # Check disponibilità
264
+ if accelerate.gpu_available():
265
+ print(f"GPU: {accelerate.gpu_info()}")
266
+
267
+ # Usa GPU per batch processing
268
+ detector = ObjectDetector.yolo_v8(device="cuda")
269
+ ```
270
+
271
+ ### Memory Management
272
+
273
+ ```python
274
+ from pyw.vision import memory
275
+
276
+ # Context manager per gestione memoria
277
+ with memory.limit_usage(max_mb=1024):
278
+ # Processa immagini grandi
279
+ large_img = Image.from_file("huge_image.tiff")
280
+ processed = large_img.resize(width=2000)
281
+
282
+ # Auto-cleanup di model cache
283
+ memory.cleanup_model_cache(max_age_days=7)
284
+ ```
285
+
286
+ ### Profiling
287
+
288
+ ```python
289
+ from pyw.vision import profiler
290
+
291
+ # Profile performance di detection
292
+ with profiler.measure("yolo_detection") as p:
293
+ results = detector.detect_batch(image_list)
294
+
295
+ print(f"Detection took {p.elapsed:.2f}s")
296
+ print(f"Images/sec: {len(image_list) / p.elapsed:.1f}")
297
+ ```
298
+
299
+ ## Quality Assurance
300
+
301
+ ### Image Quality Metrics
302
+
303
+ ```python
304
+ from pyw.vision import quality
305
+
306
+ # Calcola metriche qualità
307
+ metrics = quality.analyze("image.jpg")
308
+ print(f"Sharpness: {metrics.sharpness:.2f}")
309
+ print(f"Brightness: {metrics.brightness:.2f}")
310
+ print(f"Contrast: {metrics.contrast:.2f}")
311
+ print(f"Noise level: {metrics.noise_level:.2f}")
312
+
313
+ # Compare due immagini
314
+ similarity = quality.compare("original.jpg", "processed.jpg")
315
+ print(f"SSIM: {similarity.ssim:.3f}")
316
+ print(f"PSNR: {similarity.psnr:.1f} dB")
317
+ ```
318
+
319
+ ### Validation Pipeline
320
+
321
+ ```python
322
+ from pyw.vision import validation
323
+
324
+ # Valida batch di immagini
325
+ validator = validation.ImageValidator(
326
+ min_resolution=(640, 480),
327
+ max_file_size_mb=10,
328
+ allowed_formats=["jpg", "png", "webp"]
329
+ )
330
+
331
+ results = validator.validate_batch("input_dir/")
332
+ valid_images = [r.path for r in results if r.is_valid]
333
+ ```
334
+
335
+ ## Testing Support
336
+
337
+ ```python
338
+ from pyw.vision.testing import (
339
+ generate_test_image, assert_image_equal,
340
+ mock_detector, benchmark_pipeline
341
+ )
342
+
343
+ def test_image_processing():
344
+ # Genera immagine test
345
+ test_img = generate_test_image(
346
+ width=800, height=600,
347
+ pattern="checkerboard",
348
+ noise_level=0.1
349
+ )
350
+
351
+ # Processa
352
+ result = processor.enhance(test_img)
353
+
354
+ # Assertions
355
+ assert_image_equal(result, expected_result, tolerance=0.05)
356
+ assert result.width == 800
357
+ assert result.height == 600
358
+
359
+ # Mock detector per testing
360
+ with mock_detector(fake_detections=[
361
+ {"class": "person", "confidence": 0.9, "bbox": [10, 10, 100, 200]}
362
+ ]) as detector:
363
+ results = detector.detect("test.jpg")
364
+ assert len(results) == 1
365
+ ```
366
+
367
+ ## CLI Tools
368
+
369
+ ```bash
370
+ # Resize batch di immagini
371
+ pyw-vision resize input/*.jpg --width=800 --output=resized/
372
+
373
+ # Object detection con preview
374
+ pyw-vision detect image.jpg --model=yolov8m --show-preview
375
+
376
+ # Estrai frames da video
377
+ pyw-vision extract-frames video.mp4 --fps=1 --output=frames/
378
+
379
+ # Genera report qualità
380
+ pyw-vision quality-report images/ --format=html --output=report.html
381
+
382
+ # Benchmark performance
383
+ pyw-vision benchmark --model=yolov8s --images=test_set/ --iterations=10
384
+ ```
385
+
386
+ ## Examples
387
+
388
+ ### Automated Photo Enhancement
389
+
390
+ ```python
391
+ from pyw.vision import PhotoEnhancer
392
+
393
+ # Setup enhancer con AI
394
+ enhancer = PhotoEnhancer(
395
+ auto_exposure=True,
396
+ noise_reduction=True,
397
+ color_enhancement=True,
398
+ face_aware=True # Ottimizza per ritratti
399
+ )
400
+
401
+ # Enhance singola foto
402
+ enhanced = enhancer.enhance("photo.jpg")
403
+ enhanced.save("enhanced.jpg")
404
+
405
+ # Batch con settings ottimizzati per tipo
406
+ settings = {
407
+ "portrait": {"face_aware": True, "skin_smoothing": 0.3},
408
+ "landscape": {"saturation": 1.2, "clarity": 1.1},
409
+ "night": {"denoise": "aggressive", "highlight_recovery": True}
410
+ }
411
+
412
+ for photo_type, photos in photo_collections.items():
413
+ enhancer.update_settings(settings[photo_type])
414
+ for photo in photos:
415
+ enhanced = enhancer.enhance(photo)
416
+ enhanced.save(f"enhanced/{photo_type}/{photo.name}")
417
+ ```
418
+
419
+ ### Security Camera Analysis
420
+
421
+ ```python
422
+ from pyw.vision import SecurityAnalyzer
423
+ from pyw.logger import get_logger
424
+
425
+ logger = get_logger("security")
426
+
427
+ # Setup analyzer
428
+ analyzer = SecurityAnalyzer(
429
+ person_detection=True,
430
+ vehicle_detection=True,
431
+ intrusion_zones=["front_door", "parking"],
432
+ alert_confidence=0.7
433
+ )
434
+
435
+ # Analizza frame camera
436
+ frame = capture_camera_frame()
437
+ events = analyzer.analyze(frame, timestamp=datetime.now())
438
+
439
+ for event in events:
440
+ if event.type == "person_detected":
441
+ logger.warning(f"Person detected in {event.zone}")
442
+ # Invia alert
443
+
444
+ elif event.type == "vehicle_detected":
445
+ logger.info(f"Vehicle detected: {event.details}")
446
+ ```
447
+
448
+ ## Roadmap
449
+
450
+ - 🤖 **AI Models**: Integrazione con modelli Hugging Face, ONNX runtime
451
+ - 🎥 **Video Processing**: Advanced video analysis, object tracking
452
+ - 📱 **Mobile Optimization**: Lightweight models per deployment mobile
453
+ - ☁️ **Cloud Integration**: Processing su AWS Rekognition, Google Vision API
454
+ - 🔧 **Custom Training**: Tools per training di modelli personalizzati
455
+ - 📊 **Analytics**: Dashboard e reporting avanzati
456
+ - 🚀 **Edge Computing**: Ottimizzazioni per Raspberry Pi, edge devices
457
+
458
+ ## Architecture
459
+
460
+ ```
461
+ pyw-vision/
462
+ ├── pyw/
463
+ │ └── vision/
464
+ │ ├── __init__.py # Public API
465
+ │ ├── core/
466
+ │ │ ├── image.py # Image class e processing base
467
+ │ │ ├── detector.py # Object detection
468
+ │ │ ├── features.py # Feature extraction
469
+ │ │ └── pipeline.py # Processing pipelines
470
+ │ ├── models/
471
+ │ │ ├── yolo.py # YOLO integration
472
+ │ │ ├── opencv.py # OpenCV models
473
+ │ │ └── custom.py # Custom model loading
474
+ │ ├── filters/
475
+ │ │ ├── enhance.py # Enhancement filters
476
+ │ │ ├── artistic.py # Artistic effects
477
+ │ │ └── repair.py # Image repair
478
+ │ ├── utils/
479
+ │ │ ├── metrics.py # Quality metrics
480
+ │ │ ├── geometry.py # Geometric operations
481
+ │ │ └── color.py # Color space operations
482
+ │ └── cli/ # Command line tools
483
+ └── tests/ # Test suite completa
484
+ ```
485
+
486
+ ## Contributing
487
+
488
+ 1. **Fork & Clone**: `git clone https://github.com/pythonWoods/pyw-vision.git`
489
+ 2. **Development setup**: `poetry install --with dev && poetry shell`
490
+ 3. **Install test dependencies**: `poetry install --extras "ml"`
491
+ 4. **Quality checks**: `ruff check . && mypy && pytest --cov`
492
+ 5. **Test con immagini reali**: Usa il dataset in `tests/fixtures/`
493
+ 6. **Documentation**: Aggiorna examples per nuove features
494
+ 7. **Performance**: Benchmark changes con `pytest --benchmark-only`
495
+ 8. **Pull Request**: Include esempi e test coverage
496
+
497
+ Esplora il mondo della computer vision con **pythonWoods**! 🌲👁️
498
+
499
+ ## Links utili
500
+
501
+ Documentazione dev (work-in-progress) → https://pythonwoods.dev/docs/pyw-vision/latest/
502
+
503
+ Issue tracker → https://github.com/pythonWoods/pyw-vision/issues
504
+
505
+ Changelog → https://github.com/pythonWoods/pyw-vision/releases
506
+
507
+ © pythonWoods — MIT License
@@ -0,0 +1,487 @@
1
+ # pyw-vision 👁️
2
+ [![PyPI](https://img.shields.io/pypi/v/pyw-vision.svg)](https://pypi.org/project/pyw-vision/)
3
+ [![CI](https://github.com/pythonWoods/pyw-vision/actions/workflows/ci.yml/badge.svg)](https://github.com/pythonWoods/pyw-vision/actions/workflows/ci.yml)
4
+ [![License](https://img.shields.io/badge/License-MIT-yellow.svg)](LICENSE)
5
+
6
+ > Computer vision utilities & helpers per l'ecosistema **pythonWoods**.
7
+
8
+ ## Overview
9
+
10
+ **pyw-vision** fornisce utilities e helpers per computer vision, con focus su semplicità d'uso e performance. Parte dell'ecosistema pythonWoods, si integra perfettamente con gli altri moduli per soluzioni complete di image processing e analisi visiva.
11
+
12
+ ## Features
13
+
14
+ ### 🖼️ Image Processing
15
+ - **Resize & Transform**: Ridimensionamento intelligente con aspect ratio
16
+ - **Filters & Effects**: Blur, sharpen, brightness, contrast
17
+ - **Format Conversion**: Supporto multi-formato (JPEG, PNG, WebP, TIFF)
18
+ - **Batch Processing**: Elaborazione efficiente di grosse quantità di immagini
19
+
20
+ ### 🎯 Object Detection
21
+ - **YOLO Integration**: Supporto per YOLOv5/v8 con model caching
22
+ - **Custom Models**: Caricamento di modelli personalizzati
23
+ - **Bounding Boxes**: Utilities per gestione e rendering bbox
24
+ - **Confidence Filtering**: Filtri automatici per detection quality
25
+
26
+ ### 🔍 Feature Extraction
27
+ - **Keypoints Detection**: SIFT, ORB, Harris corners
28
+ - **Descriptors**: Feature matching e similarity
29
+ - **Template Matching**: Ricerca pattern in immagini
30
+ - **Contour Analysis**: Shape detection e analysis
31
+
32
+ ### 📊 Analysis Tools
33
+ - **Image Metrics**: Histogram, statistics, quality metrics
34
+ - **Comparison**: SSIM, MSE, perceptual difference
35
+ - **Color Analysis**: Palette extraction, dominant colors
36
+ - **Geometric**: Perspective correction, distortion removal
37
+
38
+ ## Installation
39
+
40
+ ```bash
41
+ # Base installation
42
+ pip install pyw-vision
43
+
44
+ # Con supporto deep learning (YOLOv8, PyTorch)
45
+ pip install pyw-vision[ml]
46
+
47
+ # Computer vision completo (include motion detection)
48
+ pip install pyw-cv # Bundle: pyw-vision + pyw-motion
49
+ ```
50
+
51
+ ## Quick Start
52
+
53
+ ### Basic Image Processing
54
+
55
+ ```python
56
+ from pyw.vision import Image, ImageProcessor
57
+
58
+ # Carica e processa immagine
59
+ img = Image.from_file("photo.jpg")
60
+
61
+ # Chain processing
62
+ processed = (img
63
+ .resize(width=800, keep_aspect=True)
64
+ .enhance_contrast(1.2)
65
+ .apply_blur(radius=2)
66
+ .convert_format("webp")
67
+ )
68
+
69
+ processed.save("output.webp", quality=85)
70
+ ```
71
+
72
+ ### Object Detection
73
+
74
+ ```python
75
+ from pyw.vision import ObjectDetector
76
+
77
+ # Setup detector con model caching automatico
78
+ detector = ObjectDetector.yolo_v8(model_size="medium")
79
+
80
+ # Detection su singola immagine
81
+ results = detector.detect("image.jpg", confidence=0.5)
82
+
83
+ for detection in results:
84
+ print(f"Object: {detection.class_name} ({detection.confidence:.2f})")
85
+ print(f"Bbox: {detection.bbox}")
86
+
87
+ # Batch detection
88
+ batch_results = detector.detect_batch([
89
+ "img1.jpg", "img2.jpg", "img3.jpg"
90
+ ], max_workers=4)
91
+ ```
92
+
93
+ ### Feature Matching
94
+
95
+ ```python
96
+ from pyw.vision import FeatureExtractor, FeatureMatcher
97
+
98
+ # Estrai features da due immagini
99
+ extractor = FeatureExtractor.sift(n_features=1000)
100
+ features1 = extractor.extract("template.jpg")
101
+ features2 = extractor.extract("scene.jpg")
102
+
103
+ # Match features
104
+ matcher = FeatureMatcher(algorithm="flann")
105
+ matches = matcher.match(features1, features2)
106
+
107
+ # Trova homography
108
+ homography = matcher.find_homography(matches, min_matches=10)
109
+ if homography is not None:
110
+ print("Template found in scene!")
111
+ ```
112
+
113
+ ## Advanced Usage
114
+
115
+ ### Custom Image Pipeline
116
+
117
+ ```python
118
+ from pyw.vision import ImagePipeline, filters
119
+
120
+ # Definisci pipeline custom
121
+ pipeline = ImagePipeline([
122
+ filters.NormalizeLighting(),
123
+ filters.RemoveNoise(method="bilateral"),
124
+ filters.EnhanceSharpness(factor=1.5),
125
+ filters.ColorBalance(auto=True)
126
+ ])
127
+
128
+ # Applica a singola immagine
129
+ result = pipeline.process("noisy_image.jpg")
130
+
131
+ # Batch processing con progress
132
+ results = pipeline.process_batch(
133
+ ["img1.jpg", "img2.jpg", "img3.jpg"],
134
+ output_dir="processed/",
135
+ show_progress=True
136
+ )
137
+ ```
138
+
139
+ ### Smart Cropping
140
+
141
+ ```python
142
+ from pyw.vision import SmartCropper
143
+
144
+ cropper = SmartCropper(
145
+ target_ratio=(16, 9),
146
+ focus_detection=True # Usa face/object detection
147
+ )
148
+
149
+ # Crop intelligente mantenendo soggetti importanti
150
+ cropped = cropper.crop("portrait.jpg")
151
+ cropped.save("cropped_16x9.jpg")
152
+
153
+ # Crop multipli per social media
154
+ variants = cropper.crop_variants("image.jpg", formats=[
155
+ ("instagram_post", 1080, 1080),
156
+ ("instagram_story", 1080, 1920),
157
+ ("facebook_cover", 1200, 630)
158
+ ])
159
+ ```
160
+
161
+ ### Real-time Processing
162
+
163
+ ```python
164
+ from pyw.vision import VideoProcessor
165
+ import cv2
166
+
167
+ # Setup video processor
168
+ processor = VideoProcessor(
169
+ input_source=0, # Webcam
170
+ fps_limit=30
171
+ )
172
+
173
+ @processor.frame_handler
174
+ def process_frame(frame):
175
+ # Applica detection in real-time
176
+ detections = detector.detect(frame, confidence=0.6)
177
+
178
+ # Disegna bounding boxes
179
+ for det in detections:
180
+ frame = det.draw_on(frame, color="red", thickness=2)
181
+
182
+ return frame
183
+
184
+ # Avvia processing
185
+ processor.start()
186
+ ```
187
+
188
+ ### Integration con pyw-fs
189
+
190
+ ```python
191
+ from pyw.vision import Image
192
+ from pyw.fs import FileSystem
193
+
194
+ # Usa filesystem unificato (local/S3/GCS)
195
+ fs = FileSystem.from_url("s3://my-bucket/images/")
196
+
197
+ # Processa immagini remote
198
+ for image_path in fs.glob("*.jpg"):
199
+ img = Image.from_fs(fs, image_path)
200
+
201
+ # Genera thumbnail
202
+ thumb = img.resize(width=200, keep_aspect=True)
203
+
204
+ # Salva thumbnail
205
+ thumb_path = image_path.replace(".jpg", "_thumb.jpg")
206
+ thumb.save_to_fs(fs, thumb_path)
207
+ ```
208
+
209
+ ## Configuration
210
+
211
+ ```python
212
+ from pyw.vision import VisionConfig
213
+ from pyw.core import BaseConfig
214
+
215
+ class MyVisionConfig(BaseConfig):
216
+ # Model paths e caching
217
+ model_cache_dir: str = "~/.pyw/vision/models"
218
+ max_cache_size_gb: float = 5.0
219
+
220
+ # Default processing settings
221
+ default_image_quality: int = 85
222
+ max_image_dimension: int = 4096
223
+
224
+ # Performance
225
+ max_workers: int = 4
226
+ use_gpu: bool = True
227
+ memory_limit_mb: int = 2048
228
+
229
+ # Applica config globalmente
230
+ VisionConfig.set_global(MyVisionConfig())
231
+ ```
232
+
233
+ ## Performance Optimization
234
+
235
+ ### GPU Acceleration
236
+
237
+ ```python
238
+ from pyw.vision import accelerate
239
+
240
+ # Auto-detect e configura GPU
241
+ accelerate.setup_gpu(memory_fraction=0.8)
242
+
243
+ # Check disponibilità
244
+ if accelerate.gpu_available():
245
+ print(f"GPU: {accelerate.gpu_info()}")
246
+
247
+ # Usa GPU per batch processing
248
+ detector = ObjectDetector.yolo_v8(device="cuda")
249
+ ```
250
+
251
+ ### Memory Management
252
+
253
+ ```python
254
+ from pyw.vision import memory
255
+
256
+ # Context manager per gestione memoria
257
+ with memory.limit_usage(max_mb=1024):
258
+ # Processa immagini grandi
259
+ large_img = Image.from_file("huge_image.tiff")
260
+ processed = large_img.resize(width=2000)
261
+
262
+ # Auto-cleanup di model cache
263
+ memory.cleanup_model_cache(max_age_days=7)
264
+ ```
265
+
266
+ ### Profiling
267
+
268
+ ```python
269
+ from pyw.vision import profiler
270
+
271
+ # Profile performance di detection
272
+ with profiler.measure("yolo_detection") as p:
273
+ results = detector.detect_batch(image_list)
274
+
275
+ print(f"Detection took {p.elapsed:.2f}s")
276
+ print(f"Images/sec: {len(image_list) / p.elapsed:.1f}")
277
+ ```
278
+
279
+ ## Quality Assurance
280
+
281
+ ### Image Quality Metrics
282
+
283
+ ```python
284
+ from pyw.vision import quality
285
+
286
+ # Calcola metriche qualità
287
+ metrics = quality.analyze("image.jpg")
288
+ print(f"Sharpness: {metrics.sharpness:.2f}")
289
+ print(f"Brightness: {metrics.brightness:.2f}")
290
+ print(f"Contrast: {metrics.contrast:.2f}")
291
+ print(f"Noise level: {metrics.noise_level:.2f}")
292
+
293
+ # Compare due immagini
294
+ similarity = quality.compare("original.jpg", "processed.jpg")
295
+ print(f"SSIM: {similarity.ssim:.3f}")
296
+ print(f"PSNR: {similarity.psnr:.1f} dB")
297
+ ```
298
+
299
+ ### Validation Pipeline
300
+
301
+ ```python
302
+ from pyw.vision import validation
303
+
304
+ # Valida batch di immagini
305
+ validator = validation.ImageValidator(
306
+ min_resolution=(640, 480),
307
+ max_file_size_mb=10,
308
+ allowed_formats=["jpg", "png", "webp"]
309
+ )
310
+
311
+ results = validator.validate_batch("input_dir/")
312
+ valid_images = [r.path for r in results if r.is_valid]
313
+ ```
314
+
315
+ ## Testing Support
316
+
317
+ ```python
318
+ from pyw.vision.testing import (
319
+ generate_test_image, assert_image_equal,
320
+ mock_detector, benchmark_pipeline
321
+ )
322
+
323
+ def test_image_processing():
324
+ # Genera immagine test
325
+ test_img = generate_test_image(
326
+ width=800, height=600,
327
+ pattern="checkerboard",
328
+ noise_level=0.1
329
+ )
330
+
331
+ # Processa
332
+ result = processor.enhance(test_img)
333
+
334
+ # Assertions
335
+ assert_image_equal(result, expected_result, tolerance=0.05)
336
+ assert result.width == 800
337
+ assert result.height == 600
338
+
339
+ # Mock detector per testing
340
+ with mock_detector(fake_detections=[
341
+ {"class": "person", "confidence": 0.9, "bbox": [10, 10, 100, 200]}
342
+ ]) as detector:
343
+ results = detector.detect("test.jpg")
344
+ assert len(results) == 1
345
+ ```
346
+
347
+ ## CLI Tools
348
+
349
+ ```bash
350
+ # Resize batch di immagini
351
+ pyw-vision resize input/*.jpg --width=800 --output=resized/
352
+
353
+ # Object detection con preview
354
+ pyw-vision detect image.jpg --model=yolov8m --show-preview
355
+
356
+ # Estrai frames da video
357
+ pyw-vision extract-frames video.mp4 --fps=1 --output=frames/
358
+
359
+ # Genera report qualità
360
+ pyw-vision quality-report images/ --format=html --output=report.html
361
+
362
+ # Benchmark performance
363
+ pyw-vision benchmark --model=yolov8s --images=test_set/ --iterations=10
364
+ ```
365
+
366
+ ## Examples
367
+
368
+ ### Automated Photo Enhancement
369
+
370
+ ```python
371
+ from pyw.vision import PhotoEnhancer
372
+
373
+ # Setup enhancer con AI
374
+ enhancer = PhotoEnhancer(
375
+ auto_exposure=True,
376
+ noise_reduction=True,
377
+ color_enhancement=True,
378
+ face_aware=True # Ottimizza per ritratti
379
+ )
380
+
381
+ # Enhance singola foto
382
+ enhanced = enhancer.enhance("photo.jpg")
383
+ enhanced.save("enhanced.jpg")
384
+
385
+ # Batch con settings ottimizzati per tipo
386
+ settings = {
387
+ "portrait": {"face_aware": True, "skin_smoothing": 0.3},
388
+ "landscape": {"saturation": 1.2, "clarity": 1.1},
389
+ "night": {"denoise": "aggressive", "highlight_recovery": True}
390
+ }
391
+
392
+ for photo_type, photos in photo_collections.items():
393
+ enhancer.update_settings(settings[photo_type])
394
+ for photo in photos:
395
+ enhanced = enhancer.enhance(photo)
396
+ enhanced.save(f"enhanced/{photo_type}/{photo.name}")
397
+ ```
398
+
399
+ ### Security Camera Analysis
400
+
401
+ ```python
402
+ from pyw.vision import SecurityAnalyzer
403
+ from pyw.logger import get_logger
404
+
405
+ logger = get_logger("security")
406
+
407
+ # Setup analyzer
408
+ analyzer = SecurityAnalyzer(
409
+ person_detection=True,
410
+ vehicle_detection=True,
411
+ intrusion_zones=["front_door", "parking"],
412
+ alert_confidence=0.7
413
+ )
414
+
415
+ # Analizza frame camera
416
+ frame = capture_camera_frame()
417
+ events = analyzer.analyze(frame, timestamp=datetime.now())
418
+
419
+ for event in events:
420
+ if event.type == "person_detected":
421
+ logger.warning(f"Person detected in {event.zone}")
422
+ # Invia alert
423
+
424
+ elif event.type == "vehicle_detected":
425
+ logger.info(f"Vehicle detected: {event.details}")
426
+ ```
427
+
428
+ ## Roadmap
429
+
430
+ - 🤖 **AI Models**: Integrazione con modelli Hugging Face, ONNX runtime
431
+ - 🎥 **Video Processing**: Advanced video analysis, object tracking
432
+ - 📱 **Mobile Optimization**: Lightweight models per deployment mobile
433
+ - ☁️ **Cloud Integration**: Processing su AWS Rekognition, Google Vision API
434
+ - 🔧 **Custom Training**: Tools per training di modelli personalizzati
435
+ - 📊 **Analytics**: Dashboard e reporting avanzati
436
+ - 🚀 **Edge Computing**: Ottimizzazioni per Raspberry Pi, edge devices
437
+
438
+ ## Architecture
439
+
440
+ ```
441
+ pyw-vision/
442
+ ├── pyw/
443
+ │ └── vision/
444
+ │ ├── __init__.py # Public API
445
+ │ ├── core/
446
+ │ │ ├── image.py # Image class e processing base
447
+ │ │ ├── detector.py # Object detection
448
+ │ │ ├── features.py # Feature extraction
449
+ │ │ └── pipeline.py # Processing pipelines
450
+ │ ├── models/
451
+ │ │ ├── yolo.py # YOLO integration
452
+ │ │ ├── opencv.py # OpenCV models
453
+ │ │ └── custom.py # Custom model loading
454
+ │ ├── filters/
455
+ │ │ ├── enhance.py # Enhancement filters
456
+ │ │ ├── artistic.py # Artistic effects
457
+ │ │ └── repair.py # Image repair
458
+ │ ├── utils/
459
+ │ │ ├── metrics.py # Quality metrics
460
+ │ │ ├── geometry.py # Geometric operations
461
+ │ │ └── color.py # Color space operations
462
+ │ └── cli/ # Command line tools
463
+ └── tests/ # Test suite completa
464
+ ```
465
+
466
+ ## Contributing
467
+
468
+ 1. **Fork & Clone**: `git clone https://github.com/pythonWoods/pyw-vision.git`
469
+ 2. **Development setup**: `poetry install --with dev && poetry shell`
470
+ 3. **Install test dependencies**: `poetry install --extras "ml"`
471
+ 4. **Quality checks**: `ruff check . && mypy && pytest --cov`
472
+ 5. **Test con immagini reali**: Usa il dataset in `tests/fixtures/`
473
+ 6. **Documentation**: Aggiorna examples per nuove features
474
+ 7. **Performance**: Benchmark changes con `pytest --benchmark-only`
475
+ 8. **Pull Request**: Include esempi e test coverage
476
+
477
+ Esplora il mondo della computer vision con **pythonWoods**! 🌲👁️
478
+
479
+ ## Links utili
480
+
481
+ Documentazione dev (work-in-progress) → https://pythonwoods.dev/docs/pyw-vision/latest/
482
+
483
+ Issue tracker → https://github.com/pythonWoods/pyw-vision/issues
484
+
485
+ Changelog → https://github.com/pythonWoods/pyw-vision/releases
486
+
487
+ © pythonWoods — MIT License
@@ -0,0 +1,32 @@
1
+ [build-system]
2
+ requires = ["hatchling>=1.18"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "pyw-vision"
7
+ version = "0.0.0.post2"
8
+ description = "Reserved placeholder for pyw-vision (vision utilities)"
9
+ authors = [{name = "pythonWoods"}]
10
+ license = {text = "MIT"}
11
+ requires-python = ">=3.9"
12
+ readme = "README.md"
13
+ dependencies = [
14
+ "pyw-core>=0.0.0"
15
+ ]
16
+
17
+ classifiers = [
18
+ "Development Status :: 2 - Pre-Alpha",
19
+ "License :: OSI Approved :: MIT License",
20
+ "Programming Language :: Python :: 3 :: Only",
21
+ "Programming Language :: Python :: 3.11",
22
+ "Typing :: Typed",
23
+ ]
24
+
25
+ [project.urls]
26
+ Homepage = "https://github.com/pythonWoods/pyw-vision"
27
+ Documentation = "https://pythonwoods.dev/docs/pyw-vision/latest/"
28
+ Issues = "https://github.com/pythonWoods/pyw-vision/issues"
29
+ Changelog = "https://github.com/pythonWoods/pyw-vision/releases"
30
+
31
+ [tool.hatch.build.targets.wheel]
32
+ packages = ["src/pyw"]
pyw_vision-0.0.0/PKG-INFO DELETED
@@ -1,15 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: pyw-vision
3
- Version: 0.0.0
4
- Summary: Reserved placeholder for pyw-vision (vision utilities)
5
- Project-URL: Homepage, https://github.com/pythonWoods/pyw-vision
6
- Author: pythonWoods
7
- License: MIT
8
- License-File: LICENSE
9
- Requires-Python: >=3.9
10
- Requires-Dist: pyw-core>=0.0.0
11
- Description-Content-Type: text/markdown
12
-
13
- # pyw-vision
14
-
15
- Reserved placeholder for pyw-vision (vision utilities).
@@ -1,3 +0,0 @@
1
- # pyw-vision
2
-
3
- Reserved placeholder for pyw-vision (vision utilities).
@@ -1,21 +0,0 @@
1
- [build-system]
2
- requires = ["hatchling>=1.18"]
3
- build-backend = "hatchling.build"
4
-
5
- [project]
6
- name = "pyw-vision"
7
- version = "0.0.0"
8
- description = "Reserved placeholder for pyw-vision (vision utilities)"
9
- authors = [{name = "pythonWoods"}]
10
- license = {text = "MIT"}
11
- requires-python = ">=3.9"
12
- readme = "README.md"
13
- dependencies = [
14
- "pyw-core>=0.0.0"
15
- ]
16
-
17
- [project.urls]
18
- Homepage = "https://github.com/pythonWoods/pyw-vision"
19
-
20
- [tool.hatch.build.targets.wheel]
21
- packages = ["src/pyw"]
File without changes