deckbuilder 1.0.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deckbuilder/__init__.py +22 -0
- deckbuilder/cli.py +544 -0
- deckbuilder/cli_tools.py +739 -0
- deckbuilder/engine.py +1546 -0
- deckbuilder/image_handler.py +291 -0
- deckbuilder/layout_intelligence.json +288 -0
- deckbuilder/layout_intelligence.py +398 -0
- deckbuilder/naming_conventions.py +541 -0
- deckbuilder/placeholder_types.py +101 -0
- deckbuilder/placekitten_integration.py +280 -0
- deckbuilder/structured_frontmatter.py +862 -0
- deckbuilder/table_styles.py +37 -0
- deckbuilder-1.0.0b1.dist-info/METADATA +378 -0
- deckbuilder-1.0.0b1.dist-info/RECORD +37 -0
- deckbuilder-1.0.0b1.dist-info/WHEEL +5 -0
- deckbuilder-1.0.0b1.dist-info/entry_points.txt +3 -0
- deckbuilder-1.0.0b1.dist-info/licenses/LICENSE +201 -0
- deckbuilder-1.0.0b1.dist-info/top_level.txt +4 -0
- mcp_server/__init__.py +9 -0
- mcp_server/content_analysis.py +436 -0
- mcp_server/content_optimization.py +822 -0
- mcp_server/layout_recommendations.py +595 -0
- mcp_server/main.py +550 -0
- mcp_server/tools.py +492 -0
- placekitten/README.md +561 -0
- placekitten/__init__.py +44 -0
- placekitten/core.py +184 -0
- placekitten/filters.py +183 -0
- placekitten/images/ACuteKitten-1.png +0 -0
- placekitten/images/ACuteKitten-2.png +0 -0
- placekitten/images/ACuteKitten-3.png +0 -0
- placekitten/images/TwoKitttens Playing-1.png +0 -0
- placekitten/images/TwoKitttens Playing-2.png +0 -0
- placekitten/images/TwoKitttensSleeping-1.png +0 -0
- placekitten/processor.py +262 -0
- placekitten/smart_crop.py +314 -0
- shared/__init__.py +9 -0
@@ -0,0 +1,314 @@
|
|
1
|
+
"""
|
2
|
+
Smart Crop - Intelligent cropping engine with computer vision.
|
3
|
+
|
4
|
+
This module implements the intelligent cropping pipeline using OpenCV
|
5
|
+
for edge detection, contour analysis, and rule-of-thirds composition.
|
6
|
+
"""
|
7
|
+
|
8
|
+
from pathlib import Path
|
9
|
+
from typing import Dict, List, Optional, Tuple
|
10
|
+
|
11
|
+
import cv2
|
12
|
+
import numpy as np
|
13
|
+
from PIL import Image
|
14
|
+
|
15
|
+
|
16
|
+
class SmartCropEngine:
|
17
|
+
"""
|
18
|
+
Intelligent cropping engine using computer vision techniques.
|
19
|
+
|
20
|
+
Implements a multi-step pipeline:
|
21
|
+
1. Original analysis and preprocessing
|
22
|
+
2. Edge detection with Canny algorithm
|
23
|
+
3. Contour identification and analysis
|
24
|
+
4. Rule-of-thirds composition calculation
|
25
|
+
5. Optimal crop area determination
|
26
|
+
"""
|
27
|
+
|
28
|
+
def __init__(self):
|
29
|
+
"""Initialize the smart crop engine."""
|
30
|
+
self.debug_steps = []
|
31
|
+
self.crop_info = {}
|
32
|
+
|
33
|
+
def smart_crop(
|
34
|
+
self,
|
35
|
+
image: Image.Image,
|
36
|
+
target_width: int,
|
37
|
+
target_height: int,
|
38
|
+
save_steps: bool = False,
|
39
|
+
output_prefix: str = "smart_crop",
|
40
|
+
output_folder: Optional[str] = None,
|
41
|
+
strategy: str = "haar-face",
|
42
|
+
) -> Tuple[Image.Image, Dict]:
|
43
|
+
"""
|
44
|
+
Perform intelligent cropping with computer vision.
|
45
|
+
|
46
|
+
Args:
|
47
|
+
image: PIL Image to crop
|
48
|
+
target_width: Target width in pixels
|
49
|
+
target_height: Target height in pixels
|
50
|
+
save_steps: Save intermediate processing steps
|
51
|
+
output_prefix: Prefix for step visualization files
|
52
|
+
output_folder: Directory to save step files (optional)
|
53
|
+
|
54
|
+
Returns:
|
55
|
+
Tuple of (cropped_image, crop_info)
|
56
|
+
"""
|
57
|
+
# Clear previous debug info
|
58
|
+
self.debug_steps = []
|
59
|
+
self.crop_info = {}
|
60
|
+
|
61
|
+
# Convert PIL to OpenCV format
|
62
|
+
cv_image = self._pil_to_cv2(image)
|
63
|
+
original_height, original_width = cv_image.shape[:2]
|
64
|
+
|
65
|
+
# Step 1: Original analysis
|
66
|
+
step1_image = cv_image.copy()
|
67
|
+
self._add_debug_step("1-original", step1_image, save_steps, output_prefix, output_folder)
|
68
|
+
|
69
|
+
# Step 2: Grayscale conversion
|
70
|
+
gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
|
71
|
+
step2_image = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
|
72
|
+
self._add_debug_step("2-grayscale", step2_image, save_steps, output_prefix, output_folder)
|
73
|
+
|
74
|
+
# Step 3: Noise reduction with Gaussian blur
|
75
|
+
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
|
76
|
+
step3_image = cv2.cvtColor(blurred, cv2.COLOR_GRAY2BGR)
|
77
|
+
self._add_debug_step("3-blurred", step3_image, save_steps, output_prefix, output_folder)
|
78
|
+
|
79
|
+
# Step 4: Edge detection with Canny
|
80
|
+
edges = cv2.Canny(blurred, 50, 150)
|
81
|
+
# Highlight edges in red for visualization
|
82
|
+
step4_vis = cv_image.copy()
|
83
|
+
step4_vis[edges > 0] = [0, 0, 255] # Red edges
|
84
|
+
self._add_debug_step("4-edges", step4_vis, save_steps, output_prefix, output_folder)
|
85
|
+
|
86
|
+
# Strategy-based subject detection
|
87
|
+
subject_bbox = None
|
88
|
+
largest_area = 0
|
89
|
+
|
90
|
+
if strategy == "haar-face":
|
91
|
+
face_cascade = cv2.CascadeClassifier(
|
92
|
+
cv2.data.haarcascades + "haarcascade_frontalface_default.xml"
|
93
|
+
)
|
94
|
+
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
|
95
|
+
|
96
|
+
if len(faces) > 0:
|
97
|
+
subject_bbox = max(faces, key=lambda r: r[2] * r[3]) # Select largest face
|
98
|
+
x, y, w, h = subject_bbox
|
99
|
+
step5_image = cv_image.copy()
|
100
|
+
cv2.rectangle(step5_image, (x, y), (x + w, y + h), (0, 255, 0), 3)
|
101
|
+
largest_area = w * h
|
102
|
+
else:
|
103
|
+
step5_image = cv_image.copy()
|
104
|
+
|
105
|
+
# Fallback to contour-based detection if face detection fails
|
106
|
+
if subject_bbox is None:
|
107
|
+
contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
108
|
+
largest_contour = None
|
109
|
+
for contour in contours:
|
110
|
+
area = cv2.contourArea(contour)
|
111
|
+
if area > largest_area:
|
112
|
+
largest_area = area
|
113
|
+
largest_contour = contour
|
114
|
+
|
115
|
+
if largest_contour is not None:
|
116
|
+
x, y, w, h = cv2.boundingRect(largest_contour)
|
117
|
+
subject_bbox = (x, y, w, h)
|
118
|
+
cv2.drawContours(step5_image, [largest_contour], -1, (0, 255, 0), 3)
|
119
|
+
|
120
|
+
else:
|
121
|
+
# Default to contour-based detection
|
122
|
+
contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
123
|
+
largest_contour = None
|
124
|
+
for contour in contours:
|
125
|
+
area = cv2.contourArea(contour)
|
126
|
+
if area > largest_area:
|
127
|
+
largest_area = area
|
128
|
+
largest_contour = contour
|
129
|
+
|
130
|
+
step5_image = cv_image.copy()
|
131
|
+
if largest_contour is not None:
|
132
|
+
cv2.drawContours(step5_image, [largest_contour], -1, (0, 255, 0), 3)
|
133
|
+
x, y, w, h = cv2.boundingRect(largest_contour)
|
134
|
+
subject_bbox = (x, y, w, h)
|
135
|
+
self._add_debug_step(
|
136
|
+
"5-largest-contour", step5_image, save_steps, output_prefix, output_folder
|
137
|
+
)
|
138
|
+
|
139
|
+
# Step 6: Calculate bounding box of subject (visualization only)
|
140
|
+
if subject_bbox is not None:
|
141
|
+
x, y, w, h = subject_bbox
|
142
|
+
step6_image = cv_image.copy()
|
143
|
+
cv2.rectangle(step6_image, (x, y), (x + w, y + h), (255, 0, 0), 2)
|
144
|
+
# Fill bounding box with semi-transparent blue
|
145
|
+
overlay = step6_image.copy()
|
146
|
+
cv2.rectangle(overlay, (x, y), (x + w, y + h), (255, 0, 0), -1)
|
147
|
+
step6_image = cv2.addWeighted(step6_image, 0.8, overlay, 0.2, 0)
|
148
|
+
else:
|
149
|
+
step6_image = cv_image.copy()
|
150
|
+
self._add_debug_step(
|
151
|
+
"6-bounding-box", step6_image, save_steps, output_prefix, output_folder
|
152
|
+
)
|
153
|
+
|
154
|
+
# Step 7: Rule of thirds grid and composition
|
155
|
+
crop_box = self._calculate_optimal_crop(
|
156
|
+
original_width, original_height, target_width, target_height, subject_bbox
|
157
|
+
)
|
158
|
+
|
159
|
+
# Visualize rule of thirds and crop area
|
160
|
+
step7_image = cv_image.copy()
|
161
|
+
self._draw_rule_of_thirds(step7_image, original_width, original_height)
|
162
|
+
self._add_debug_step(
|
163
|
+
"7-rule-of-thirds", step7_image, save_steps, output_prefix, output_folder
|
164
|
+
)
|
165
|
+
|
166
|
+
# Step 8: Final crop area visualization
|
167
|
+
step8_image = cv_image.copy()
|
168
|
+
x1, y1, x2, y2 = crop_box
|
169
|
+
cv2.rectangle(step8_image, (x1, y1), (x2, y2), (255, 0, 255), 3) # Magenta border
|
170
|
+
self._add_debug_step("8-crop-area", step8_image, save_steps, output_prefix, output_folder)
|
171
|
+
|
172
|
+
# Step 9: Perform the actual crop
|
173
|
+
cropped_cv = cv_image[y1:y2, x1:x2]
|
174
|
+
cropped_resized = cv2.resize(
|
175
|
+
cropped_cv, (target_width, target_height), interpolation=cv2.INTER_LANCZOS4
|
176
|
+
)
|
177
|
+
|
178
|
+
# Convert back to PIL
|
179
|
+
final_image = self._cv2_to_pil(cropped_resized)
|
180
|
+
|
181
|
+
# Save final result
|
182
|
+
self._add_debug_step("9-final", cropped_resized, save_steps, output_prefix, output_folder)
|
183
|
+
|
184
|
+
# Store crop information
|
185
|
+
self.crop_info = {
|
186
|
+
"original_size": (original_width, original_height),
|
187
|
+
"target_size": (target_width, target_height),
|
188
|
+
"crop_box": crop_box,
|
189
|
+
"subject_bbox": subject_bbox,
|
190
|
+
"contour_area": largest_area,
|
191
|
+
"steps_saved": len(self.debug_steps) if save_steps else 0,
|
192
|
+
}
|
193
|
+
|
194
|
+
return final_image, self.crop_info
|
195
|
+
|
196
|
+
def _calculate_optimal_crop(
|
197
|
+
self,
|
198
|
+
orig_width: int,
|
199
|
+
orig_height: int,
|
200
|
+
target_width: int,
|
201
|
+
target_height: int,
|
202
|
+
subject_bbox: Optional[Tuple[int, int, int, int]],
|
203
|
+
) -> Tuple[int, int, int, int]:
|
204
|
+
"""
|
205
|
+
Calculate optimal crop box using rule of thirds and subject positioning.
|
206
|
+
|
207
|
+
Args:
|
208
|
+
orig_width: Original image width
|
209
|
+
orig_height: Original image height
|
210
|
+
target_width: Target crop width
|
211
|
+
target_height: Target crop height
|
212
|
+
subject_bbox: Bounding box of main subject (x, y, w, h)
|
213
|
+
|
214
|
+
Returns:
|
215
|
+
Crop box as (x1, y1, x2, y2)
|
216
|
+
"""
|
217
|
+
target_ratio = target_width / target_height
|
218
|
+
|
219
|
+
# Calculate crop dimensions maintaining target aspect ratio
|
220
|
+
if orig_width / orig_height > target_ratio:
|
221
|
+
# Original is wider - crop width
|
222
|
+
crop_height = orig_height
|
223
|
+
crop_width = int(crop_height * target_ratio)
|
224
|
+
else:
|
225
|
+
# Original is taller - crop height
|
226
|
+
crop_width = orig_width
|
227
|
+
crop_height = int(crop_width / target_ratio)
|
228
|
+
|
229
|
+
# Default to center crop
|
230
|
+
crop_x = (orig_width - crop_width) // 2
|
231
|
+
crop_y = (orig_height - crop_height) // 2
|
232
|
+
|
233
|
+
# Adjust based on subject position if available
|
234
|
+
if subject_bbox is not None:
|
235
|
+
subj_x, subj_y, subj_w, subj_h = subject_bbox
|
236
|
+
subj_center_x = subj_x + subj_w // 2
|
237
|
+
subj_center_y = subj_y + subj_h // 2
|
238
|
+
|
239
|
+
# Try to position subject in lower third (rule of thirds)
|
240
|
+
ideal_subj_x = crop_width // 2
|
241
|
+
ideal_subj_y = int(crop_height * 2 / 3) # Lower third
|
242
|
+
|
243
|
+
# Calculate desired crop position
|
244
|
+
desired_crop_x = subj_center_x - ideal_subj_x
|
245
|
+
desired_crop_y = subj_center_y - ideal_subj_y
|
246
|
+
|
247
|
+
# Ensure crop stays within image bounds
|
248
|
+
crop_x = max(0, min(desired_crop_x, orig_width - crop_width))
|
249
|
+
crop_y = max(0, min(desired_crop_y, orig_height - crop_height))
|
250
|
+
|
251
|
+
return (crop_x, crop_y, crop_x + crop_width, crop_y + crop_height)
|
252
|
+
|
253
|
+
def _draw_rule_of_thirds(self, image: np.ndarray, width: int, height: int) -> None:
|
254
|
+
"""Draw rule of thirds grid on image."""
|
255
|
+
# Vertical lines
|
256
|
+
cv2.line(image, (width // 3, 0), (width // 3, height), (255, 255, 0), 2)
|
257
|
+
cv2.line(image, (2 * width // 3, 0), (2 * width // 3, height), (255, 255, 0), 2)
|
258
|
+
|
259
|
+
# Horizontal lines
|
260
|
+
cv2.line(image, (0, height // 3), (width, height // 3), (255, 255, 0), 2)
|
261
|
+
cv2.line(image, (0, 2 * height // 3), (width, 2 * height // 3), (255, 255, 0), 2)
|
262
|
+
|
263
|
+
def _pil_to_cv2(self, pil_image: Image.Image) -> np.ndarray:
|
264
|
+
"""Convert PIL Image to OpenCV format."""
|
265
|
+
# Convert PIL to RGB if not already
|
266
|
+
if pil_image.mode != "RGB":
|
267
|
+
pil_image = pil_image.convert("RGB")
|
268
|
+
|
269
|
+
# Convert to numpy array and change from RGB to BGR
|
270
|
+
cv_image = np.array(pil_image)
|
271
|
+
cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)
|
272
|
+
return cv_image
|
273
|
+
|
274
|
+
def _cv2_to_pil(self, cv_image: np.ndarray) -> Image.Image:
|
275
|
+
"""Convert OpenCV image to PIL format."""
|
276
|
+
# Convert from BGR to RGB
|
277
|
+
rgb_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
|
278
|
+
return Image.fromarray(rgb_image)
|
279
|
+
|
280
|
+
def _add_debug_step(
|
281
|
+
self,
|
282
|
+
step_name: str,
|
283
|
+
image: np.ndarray,
|
284
|
+
save_steps: bool,
|
285
|
+
output_prefix: str,
|
286
|
+
output_folder: Optional[str] = None,
|
287
|
+
) -> None:
|
288
|
+
"""Add debug step and optionally save to file."""
|
289
|
+
if save_steps:
|
290
|
+
step_info = {"name": step_name, "image": image.copy()}
|
291
|
+
self.debug_steps.append(step_info)
|
292
|
+
|
293
|
+
# Save step image with proper folder handling
|
294
|
+
filename = f"{output_prefix}_{step_name}.jpg"
|
295
|
+
if output_folder:
|
296
|
+
# Ensure output folder exists
|
297
|
+
Path(output_folder).mkdir(parents=True, exist_ok=True)
|
298
|
+
filepath = Path(output_folder) / filename
|
299
|
+
else:
|
300
|
+
filepath = Path(filename)
|
301
|
+
|
302
|
+
cv2.imwrite(str(filepath), image)
|
303
|
+
|
304
|
+
def get_debug_steps(self) -> List[Dict]:
|
305
|
+
"""Get list of debug steps with images."""
|
306
|
+
return self.debug_steps
|
307
|
+
|
308
|
+
def get_crop_info(self) -> Dict:
|
309
|
+
"""Get detailed information about the last crop operation."""
|
310
|
+
return self.crop_info
|
311
|
+
|
312
|
+
|
313
|
+
# Global instance for easy access
|
314
|
+
smart_crop_engine = SmartCropEngine()
|