@agent-webui/ai-desk-harness-gimp 1.0.29-beta3 → 1.0.32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/python/agent-harness/build/lib/cli_anything/gimp/__init__.py +0 -1
- package/python/agent-harness/build/lib/cli_anything/gimp/__main__.py +0 -3
- package/python/agent-harness/build/lib/cli_anything/gimp/core/__init__.py +0 -1
- package/python/agent-harness/build/lib/cli_anything/gimp/core/canvas.py +0 -193
- package/python/agent-harness/build/lib/cli_anything/gimp/core/export.py +0 -479
- package/python/agent-harness/build/lib/cli_anything/gimp/core/filters.py +0 -382
- package/python/agent-harness/build/lib/cli_anything/gimp/core/layers.py +0 -249
- package/python/agent-harness/build/lib/cli_anything/gimp/core/media.py +0 -174
- package/python/agent-harness/build/lib/cli_anything/gimp/core/project.py +0 -131
- package/python/agent-harness/build/lib/cli_anything/gimp/core/session.py +0 -130
- package/python/agent-harness/build/lib/cli_anything/gimp/gimp_cli.py +0 -788
- package/python/agent-harness/build/lib/cli_anything/gimp/tests/__init__.py +0 -1
- package/python/agent-harness/build/lib/cli_anything/gimp/tests/test_core.py +0 -478
- package/python/agent-harness/build/lib/cli_anything/gimp/tests/test_full_e2e.py +0 -578
- package/python/agent-harness/build/lib/cli_anything/gimp/utils/__init__.py +0 -1
- package/python/agent-harness/build/lib/cli_anything/gimp/utils/gimp_backend.py +0 -208
- package/python/agent-harness/build/lib/cli_anything/gimp/utils/repl_skin.py +0 -498
package/package.json
CHANGED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
"""GIMP CLI - A stateful CLI for image editing."""
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
"""GIMP CLI - Core modules."""
|
|
@@ -1,193 +0,0 @@
|
|
|
1
|
-
"""GIMP CLI - Canvas operations module."""
|
|
2
|
-
|
|
3
|
-
from typing import Dict, Any
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
VALID_MODES = ("RGB", "RGBA", "L", "LA", "CMYK", "P")
|
|
7
|
-
RESAMPLE_METHODS = ("nearest", "bilinear", "bicubic", "lanczos")
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
def resize_canvas(
|
|
11
|
-
project: Dict[str, Any],
|
|
12
|
-
width: int,
|
|
13
|
-
height: int,
|
|
14
|
-
anchor: str = "center",
|
|
15
|
-
) -> Dict[str, Any]:
|
|
16
|
-
"""Resize the canvas (does not scale content, adds/removes space).
|
|
17
|
-
|
|
18
|
-
Args:
|
|
19
|
-
project: The project dict
|
|
20
|
-
width: New canvas width
|
|
21
|
-
height: New canvas height
|
|
22
|
-
anchor: Where to anchor existing content:
|
|
23
|
-
"center", "top-left", "top-right", "bottom-left", "bottom-right",
|
|
24
|
-
"top", "bottom", "left", "right"
|
|
25
|
-
"""
|
|
26
|
-
if width < 1 or height < 1:
|
|
27
|
-
raise ValueError(f"Canvas dimensions must be positive: {width}x{height}")
|
|
28
|
-
|
|
29
|
-
valid_anchors = [
|
|
30
|
-
"center", "top-left", "top-right", "bottom-left", "bottom-right",
|
|
31
|
-
"top", "bottom", "left", "right",
|
|
32
|
-
]
|
|
33
|
-
if anchor not in valid_anchors:
|
|
34
|
-
raise ValueError(f"Invalid anchor: {anchor}. Valid: {valid_anchors}")
|
|
35
|
-
|
|
36
|
-
old_w = project["canvas"]["width"]
|
|
37
|
-
old_h = project["canvas"]["height"]
|
|
38
|
-
|
|
39
|
-
# Calculate offset for existing layers based on anchor
|
|
40
|
-
dx, dy = _anchor_offset(old_w, old_h, width, height, anchor)
|
|
41
|
-
|
|
42
|
-
project["canvas"]["width"] = width
|
|
43
|
-
project["canvas"]["height"] = height
|
|
44
|
-
|
|
45
|
-
# Adjust layer offsets
|
|
46
|
-
for layer in project.get("layers", []):
|
|
47
|
-
layer["offset_x"] = layer.get("offset_x", 0) + dx
|
|
48
|
-
layer["offset_y"] = layer.get("offset_y", 0) + dy
|
|
49
|
-
|
|
50
|
-
return {
|
|
51
|
-
"old_size": f"{old_w}x{old_h}",
|
|
52
|
-
"new_size": f"{width}x{height}",
|
|
53
|
-
"anchor": anchor,
|
|
54
|
-
"offset_applied": f"({dx}, {dy})",
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
def scale_canvas(
|
|
59
|
-
project: Dict[str, Any],
|
|
60
|
-
width: int,
|
|
61
|
-
height: int,
|
|
62
|
-
resample: str = "lanczos",
|
|
63
|
-
) -> Dict[str, Any]:
|
|
64
|
-
"""Scale the canvas and all layers proportionally.
|
|
65
|
-
|
|
66
|
-
This marks layers for rescaling at render time.
|
|
67
|
-
"""
|
|
68
|
-
if width < 1 or height < 1:
|
|
69
|
-
raise ValueError(f"Canvas dimensions must be positive: {width}x{height}")
|
|
70
|
-
if resample not in RESAMPLE_METHODS:
|
|
71
|
-
raise ValueError(f"Invalid resample method: {resample}. Valid: {list(RESAMPLE_METHODS)}")
|
|
72
|
-
|
|
73
|
-
old_w = project["canvas"]["width"]
|
|
74
|
-
old_h = project["canvas"]["height"]
|
|
75
|
-
scale_x = width / old_w
|
|
76
|
-
scale_y = height / old_h
|
|
77
|
-
|
|
78
|
-
project["canvas"]["width"] = width
|
|
79
|
-
project["canvas"]["height"] = height
|
|
80
|
-
|
|
81
|
-
# Mark layers for proportional scaling
|
|
82
|
-
for layer in project.get("layers", []):
|
|
83
|
-
layer["_scale_x"] = scale_x
|
|
84
|
-
layer["_scale_y"] = scale_y
|
|
85
|
-
layer["_resample"] = resample
|
|
86
|
-
layer["offset_x"] = round(layer.get("offset_x", 0) * scale_x)
|
|
87
|
-
layer["offset_y"] = round(layer.get("offset_y", 0) * scale_y)
|
|
88
|
-
if "width" in layer:
|
|
89
|
-
layer["width"] = round(layer["width"] * scale_x)
|
|
90
|
-
if "height" in layer:
|
|
91
|
-
layer["height"] = round(layer["height"] * scale_y)
|
|
92
|
-
|
|
93
|
-
return {
|
|
94
|
-
"old_size": f"{old_w}x{old_h}",
|
|
95
|
-
"new_size": f"{width}x{height}",
|
|
96
|
-
"scale": f"({scale_x:.3f}, {scale_y:.3f})",
|
|
97
|
-
"resample": resample,
|
|
98
|
-
}
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
def crop_canvas(
|
|
102
|
-
project: Dict[str, Any],
|
|
103
|
-
left: int,
|
|
104
|
-
top: int,
|
|
105
|
-
right: int,
|
|
106
|
-
bottom: int,
|
|
107
|
-
) -> Dict[str, Any]:
|
|
108
|
-
"""Crop the canvas to a rectangle."""
|
|
109
|
-
if left < 0 or top < 0:
|
|
110
|
-
raise ValueError(f"Crop coordinates must be non-negative: left={left}, top={top}")
|
|
111
|
-
if right <= left or bottom <= top:
|
|
112
|
-
raise ValueError(f"Invalid crop region: ({left},{top})-({right},{bottom})")
|
|
113
|
-
|
|
114
|
-
old_w = project["canvas"]["width"]
|
|
115
|
-
old_h = project["canvas"]["height"]
|
|
116
|
-
|
|
117
|
-
if right > old_w or bottom > old_h:
|
|
118
|
-
raise ValueError(
|
|
119
|
-
f"Crop region ({left},{top})-({right},{bottom}) exceeds canvas {old_w}x{old_h}"
|
|
120
|
-
)
|
|
121
|
-
|
|
122
|
-
new_w = right - left
|
|
123
|
-
new_h = bottom - top
|
|
124
|
-
|
|
125
|
-
project["canvas"]["width"] = new_w
|
|
126
|
-
project["canvas"]["height"] = new_h
|
|
127
|
-
|
|
128
|
-
# Adjust layer offsets
|
|
129
|
-
for layer in project.get("layers", []):
|
|
130
|
-
layer["offset_x"] = layer.get("offset_x", 0) - left
|
|
131
|
-
layer["offset_y"] = layer.get("offset_y", 0) - top
|
|
132
|
-
|
|
133
|
-
return {
|
|
134
|
-
"old_size": f"{old_w}x{old_h}",
|
|
135
|
-
"new_size": f"{new_w}x{new_h}",
|
|
136
|
-
"crop_region": f"({left},{top})-({right},{bottom})",
|
|
137
|
-
}
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
def set_mode(project: Dict[str, Any], mode: str) -> Dict[str, Any]:
|
|
141
|
-
"""Set the canvas color mode."""
|
|
142
|
-
mode = mode.upper()
|
|
143
|
-
if mode not in VALID_MODES:
|
|
144
|
-
raise ValueError(f"Invalid color mode: {mode}. Valid: {list(VALID_MODES)}")
|
|
145
|
-
old_mode = project["canvas"].get("color_mode", "RGB")
|
|
146
|
-
project["canvas"]["color_mode"] = mode
|
|
147
|
-
return {"old_mode": old_mode, "new_mode": mode}
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
def set_dpi(project: Dict[str, Any], dpi: int) -> Dict[str, Any]:
|
|
151
|
-
"""Set the canvas DPI (dots per inch)."""
|
|
152
|
-
if dpi < 1:
|
|
153
|
-
raise ValueError(f"DPI must be positive: {dpi}")
|
|
154
|
-
old_dpi = project["canvas"].get("dpi", 72)
|
|
155
|
-
project["canvas"]["dpi"] = dpi
|
|
156
|
-
return {"old_dpi": old_dpi, "new_dpi": dpi}
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
def get_canvas_info(project: Dict[str, Any]) -> Dict[str, Any]:
|
|
160
|
-
"""Get canvas information."""
|
|
161
|
-
c = project["canvas"]
|
|
162
|
-
w, h = c["width"], c["height"]
|
|
163
|
-
dpi = c.get("dpi", 72)
|
|
164
|
-
return {
|
|
165
|
-
"width": w,
|
|
166
|
-
"height": h,
|
|
167
|
-
"color_mode": c.get("color_mode", "RGB"),
|
|
168
|
-
"background": c.get("background", "#ffffff"),
|
|
169
|
-
"dpi": dpi,
|
|
170
|
-
"size_inches": f"{w/dpi:.2f} x {h/dpi:.2f}",
|
|
171
|
-
"megapixels": f"{w * h / 1_000_000:.2f} MP",
|
|
172
|
-
}
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
def _anchor_offset(
|
|
176
|
-
old_w: int, old_h: int, new_w: int, new_h: int, anchor: str
|
|
177
|
-
) -> tuple:
|
|
178
|
-
"""Calculate pixel offset for content based on anchor position."""
|
|
179
|
-
dx_map = {
|
|
180
|
-
"top-left": 0, "left": 0, "bottom-left": 0,
|
|
181
|
-
"top": (new_w - old_w) // 2, "center": (new_w - old_w) // 2,
|
|
182
|
-
"bottom": (new_w - old_w) // 2,
|
|
183
|
-
"top-right": new_w - old_w, "right": new_w - old_w,
|
|
184
|
-
"bottom-right": new_w - old_w,
|
|
185
|
-
}
|
|
186
|
-
dy_map = {
|
|
187
|
-
"top-left": 0, "top": 0, "top-right": 0,
|
|
188
|
-
"left": (new_h - old_h) // 2, "center": (new_h - old_h) // 2,
|
|
189
|
-
"right": (new_h - old_h) // 2,
|
|
190
|
-
"bottom-left": new_h - old_h, "bottom": new_h - old_h,
|
|
191
|
-
"bottom-right": new_h - old_h,
|
|
192
|
-
}
|
|
193
|
-
return dx_map.get(anchor, 0), dy_map.get(anchor, 0)
|
|
@@ -1,479 +0,0 @@
|
|
|
1
|
-
"""GIMP CLI - Export/rendering pipeline module.
|
|
2
|
-
|
|
3
|
-
This module handles the critical "rendering" step: flattening the layer stack
|
|
4
|
-
with all filters applied and exporting to various image formats.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
import os
|
|
8
|
-
from typing import Dict, Any, Optional, Tuple
|
|
9
|
-
from PIL import Image, ImageEnhance, ImageFilter, ImageOps, ImageDraw, ImageFont
|
|
10
|
-
import numpy as np
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
# Export presets
|
|
14
|
-
EXPORT_PRESETS = {
|
|
15
|
-
"png": {"format": "PNG", "ext": ".png", "params": {"compress_level": 6}},
|
|
16
|
-
"png-max": {"format": "PNG", "ext": ".png", "params": {"compress_level": 9}},
|
|
17
|
-
"jpeg-high": {"format": "JPEG", "ext": ".jpg", "params": {"quality": 95, "subsampling": 0}},
|
|
18
|
-
"jpeg-medium": {"format": "JPEG", "ext": ".jpg", "params": {"quality": 80}},
|
|
19
|
-
"jpeg-low": {"format": "JPEG", "ext": ".jpg", "params": {"quality": 60}},
|
|
20
|
-
"webp": {"format": "WEBP", "ext": ".webp", "params": {"quality": 85}},
|
|
21
|
-
"webp-lossless": {"format": "WEBP", "ext": ".webp", "params": {"lossless": True}},
|
|
22
|
-
"tiff": {"format": "TIFF", "ext": ".tiff", "params": {"compression": "lzw"}},
|
|
23
|
-
"tiff-none": {"format": "TIFF", "ext": ".tiff", "params": {}},
|
|
24
|
-
"bmp": {"format": "BMP", "ext": ".bmp", "params": {}},
|
|
25
|
-
"gif": {"format": "GIF", "ext": ".gif", "params": {}},
|
|
26
|
-
"pdf": {"format": "PDF", "ext": ".pdf", "params": {}},
|
|
27
|
-
"ico": {"format": "ICO", "ext": ".ico", "params": {}},
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
def list_presets() -> list:
|
|
32
|
-
"""List available export presets."""
|
|
33
|
-
result = []
|
|
34
|
-
for name, p in EXPORT_PRESETS.items():
|
|
35
|
-
result.append({
|
|
36
|
-
"name": name,
|
|
37
|
-
"format": p["format"],
|
|
38
|
-
"extension": p["ext"],
|
|
39
|
-
"params": p["params"],
|
|
40
|
-
})
|
|
41
|
-
return result
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
def get_preset_info(name: str) -> Dict[str, Any]:
|
|
45
|
-
"""Get details about an export preset."""
|
|
46
|
-
if name not in EXPORT_PRESETS:
|
|
47
|
-
raise ValueError(f"Unknown preset: {name}. Available: {list(EXPORT_PRESETS.keys())}")
|
|
48
|
-
p = EXPORT_PRESETS[name]
|
|
49
|
-
return {"name": name, "format": p["format"], "extension": p["ext"], "params": p["params"]}
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
def render(
|
|
53
|
-
project: Dict[str, Any],
|
|
54
|
-
output_path: str,
|
|
55
|
-
preset: str = "png",
|
|
56
|
-
overwrite: bool = False,
|
|
57
|
-
quality: Optional[int] = None,
|
|
58
|
-
format_override: Optional[str] = None,
|
|
59
|
-
) -> Dict[str, Any]:
|
|
60
|
-
"""Render the project: flatten layers, apply filters, export.
|
|
61
|
-
|
|
62
|
-
This is the main rendering pipeline.
|
|
63
|
-
"""
|
|
64
|
-
if os.path.exists(output_path) and not overwrite:
|
|
65
|
-
raise FileExistsError(f"Output file exists: {output_path}. Use --overwrite.")
|
|
66
|
-
|
|
67
|
-
canvas = project["canvas"]
|
|
68
|
-
cw, ch = canvas["width"], canvas["height"]
|
|
69
|
-
bg_color = canvas.get("background", "#ffffff")
|
|
70
|
-
mode = canvas.get("color_mode", "RGB")
|
|
71
|
-
|
|
72
|
-
# Determine output format
|
|
73
|
-
if format_override:
|
|
74
|
-
fmt = format_override.upper()
|
|
75
|
-
save_params = {}
|
|
76
|
-
elif preset in EXPORT_PRESETS:
|
|
77
|
-
p = EXPORT_PRESETS[preset]
|
|
78
|
-
fmt = p["format"]
|
|
79
|
-
save_params = dict(p["params"])
|
|
80
|
-
else:
|
|
81
|
-
raise ValueError(f"Unknown preset: {preset}")
|
|
82
|
-
|
|
83
|
-
if quality is not None:
|
|
84
|
-
save_params["quality"] = quality
|
|
85
|
-
|
|
86
|
-
# Create canvas
|
|
87
|
-
if mode in ("RGBA", "LA"):
|
|
88
|
-
canvas_img = Image.new("RGBA", (cw, ch), (0, 0, 0, 0))
|
|
89
|
-
# Draw background if not transparent
|
|
90
|
-
if bg_color.lower() != "transparent":
|
|
91
|
-
bg = Image.new("RGBA", (cw, ch), bg_color)
|
|
92
|
-
canvas_img = Image.alpha_composite(canvas_img, bg)
|
|
93
|
-
else:
|
|
94
|
-
canvas_img = Image.new("RGB", (cw, ch), bg_color)
|
|
95
|
-
|
|
96
|
-
layers = project.get("layers", [])
|
|
97
|
-
|
|
98
|
-
# Composite layers bottom-to-top
|
|
99
|
-
for layer in reversed(layers):
|
|
100
|
-
if not layer.get("visible", True):
|
|
101
|
-
continue
|
|
102
|
-
|
|
103
|
-
layer_img = _load_layer(layer, cw, ch)
|
|
104
|
-
if layer_img is None:
|
|
105
|
-
continue
|
|
106
|
-
|
|
107
|
-
# Apply filters
|
|
108
|
-
layer_img = _apply_filters(layer_img, layer.get("filters", []))
|
|
109
|
-
|
|
110
|
-
# Apply scaling if marked
|
|
111
|
-
if "_scale_x" in layer:
|
|
112
|
-
new_w = max(1, round(layer_img.width * layer["_scale_x"]))
|
|
113
|
-
new_h = max(1, round(layer_img.height * layer["_scale_y"]))
|
|
114
|
-
resample_map = {
|
|
115
|
-
"nearest": Image.NEAREST, "bilinear": Image.BILINEAR,
|
|
116
|
-
"bicubic": Image.BICUBIC, "lanczos": Image.LANCZOS,
|
|
117
|
-
}
|
|
118
|
-
resample = resample_map.get(layer.get("_resample", "lanczos"), Image.LANCZOS)
|
|
119
|
-
layer_img = layer_img.resize((new_w, new_h), resample)
|
|
120
|
-
|
|
121
|
-
# Position on canvas
|
|
122
|
-
ox = layer.get("offset_x", 0)
|
|
123
|
-
oy = layer.get("offset_y", 0)
|
|
124
|
-
|
|
125
|
-
# Apply opacity
|
|
126
|
-
opacity = layer.get("opacity", 1.0)
|
|
127
|
-
|
|
128
|
-
# Apply blend mode and composite
|
|
129
|
-
canvas_img = _composite_layer(
|
|
130
|
-
canvas_img, layer_img, ox, oy, opacity,
|
|
131
|
-
layer.get("blend_mode", "normal")
|
|
132
|
-
)
|
|
133
|
-
|
|
134
|
-
# Convert mode for export
|
|
135
|
-
if fmt == "JPEG":
|
|
136
|
-
if canvas_img.mode == "RGBA":
|
|
137
|
-
# Flatten alpha onto white background for JPEG
|
|
138
|
-
bg = Image.new("RGB", canvas_img.size, (255, 255, 255))
|
|
139
|
-
bg.paste(canvas_img, mask=canvas_img.split()[3])
|
|
140
|
-
canvas_img = bg
|
|
141
|
-
elif canvas_img.mode != "RGB":
|
|
142
|
-
canvas_img = canvas_img.convert("RGB")
|
|
143
|
-
elif fmt == "GIF":
|
|
144
|
-
canvas_img = canvas_img.convert("P", palette=Image.ADAPTIVE, colors=256)
|
|
145
|
-
|
|
146
|
-
# Set DPI
|
|
147
|
-
dpi = canvas.get("dpi", 72)
|
|
148
|
-
save_params["dpi"] = (dpi, dpi)
|
|
149
|
-
|
|
150
|
-
# Save
|
|
151
|
-
os.makedirs(os.path.dirname(os.path.abspath(output_path)), exist_ok=True)
|
|
152
|
-
canvas_img.save(output_path, format=fmt, **save_params)
|
|
153
|
-
|
|
154
|
-
# Verify output
|
|
155
|
-
result = {
|
|
156
|
-
"output": os.path.abspath(output_path),
|
|
157
|
-
"format": fmt,
|
|
158
|
-
"size": f"{canvas_img.width}x{canvas_img.height}",
|
|
159
|
-
"file_size": os.path.getsize(output_path),
|
|
160
|
-
"file_size_human": _human_size(os.path.getsize(output_path)),
|
|
161
|
-
"preset": preset,
|
|
162
|
-
"layers_rendered": sum(1 for l in layers if l.get("visible", True)),
|
|
163
|
-
}
|
|
164
|
-
|
|
165
|
-
return result
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
def _load_layer(layer: Dict[str, Any], canvas_w: int, canvas_h: int) -> Optional[Image.Image]:
|
|
169
|
-
"""Load a layer's content as a PIL Image."""
|
|
170
|
-
layer_type = layer.get("type", "image")
|
|
171
|
-
|
|
172
|
-
if layer_type == "image":
|
|
173
|
-
source = layer.get("source")
|
|
174
|
-
if source and os.path.exists(source):
|
|
175
|
-
img = Image.open(source).convert("RGBA")
|
|
176
|
-
return img
|
|
177
|
-
# Blank layer with fill
|
|
178
|
-
fill = layer.get("fill", "transparent")
|
|
179
|
-
w = layer.get("width", canvas_w)
|
|
180
|
-
h = layer.get("height", canvas_h)
|
|
181
|
-
if fill == "transparent":
|
|
182
|
-
return Image.new("RGBA", (w, h), (0, 0, 0, 0))
|
|
183
|
-
elif fill == "white":
|
|
184
|
-
return Image.new("RGBA", (w, h), (255, 255, 255, 255))
|
|
185
|
-
elif fill == "black":
|
|
186
|
-
return Image.new("RGBA", (w, h), (0, 0, 0, 255))
|
|
187
|
-
else:
|
|
188
|
-
return Image.new("RGBA", (w, h), fill)
|
|
189
|
-
|
|
190
|
-
elif layer_type == "solid":
|
|
191
|
-
fill = layer.get("fill", "#ffffff")
|
|
192
|
-
w = layer.get("width", canvas_w)
|
|
193
|
-
h = layer.get("height", canvas_h)
|
|
194
|
-
return Image.new("RGBA", (w, h), fill)
|
|
195
|
-
|
|
196
|
-
elif layer_type == "text":
|
|
197
|
-
return _render_text_layer(layer, canvas_w, canvas_h)
|
|
198
|
-
|
|
199
|
-
return None
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
def _render_text_layer(layer: Dict[str, Any], canvas_w: int, canvas_h: int) -> Image.Image:
|
|
203
|
-
"""Render a text layer to an image."""
|
|
204
|
-
text = layer.get("text", "")
|
|
205
|
-
font_size = layer.get("font_size", 24)
|
|
206
|
-
color = layer.get("color", "#000000")
|
|
207
|
-
w = layer.get("width", canvas_w)
|
|
208
|
-
h = layer.get("height", canvas_h)
|
|
209
|
-
|
|
210
|
-
img = Image.new("RGBA", (w, h), (0, 0, 0, 0))
|
|
211
|
-
draw = ImageDraw.Draw(img)
|
|
212
|
-
|
|
213
|
-
try:
|
|
214
|
-
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", font_size)
|
|
215
|
-
except (OSError, IOError):
|
|
216
|
-
try:
|
|
217
|
-
font = ImageFont.truetype("arial.ttf", font_size)
|
|
218
|
-
except (OSError, IOError):
|
|
219
|
-
font = ImageFont.load_default()
|
|
220
|
-
|
|
221
|
-
draw.text((0, 0), text, fill=color, font=font)
|
|
222
|
-
return img
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
def _apply_filters(img: Image.Image, filters: list) -> Image.Image:
|
|
226
|
-
"""Apply a chain of filters to an image."""
|
|
227
|
-
for f in filters:
|
|
228
|
-
name = f["name"]
|
|
229
|
-
params = f.get("params", {})
|
|
230
|
-
img = _apply_single_filter(img, name, params)
|
|
231
|
-
return img
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
def _apply_single_filter(img: Image.Image, name: str, params: Dict) -> Image.Image:
|
|
235
|
-
"""Apply a single filter to an image."""
|
|
236
|
-
from cli_anything.gimp.core.filters import FILTER_REGISTRY
|
|
237
|
-
|
|
238
|
-
if name not in FILTER_REGISTRY:
|
|
239
|
-
return img # Skip unknown filters
|
|
240
|
-
|
|
241
|
-
spec = FILTER_REGISTRY[name]
|
|
242
|
-
engine = spec["engine"]
|
|
243
|
-
|
|
244
|
-
# Convert to appropriate mode for processing
|
|
245
|
-
original_mode = img.mode
|
|
246
|
-
needs_rgba = original_mode == "RGBA"
|
|
247
|
-
|
|
248
|
-
if engine == "pillow_enhance":
|
|
249
|
-
cls_name = spec["pillow_class"]
|
|
250
|
-
factor = params.get("factor", 1.0)
|
|
251
|
-
# ImageEnhance needs RGB, handle RGBA by separating alpha
|
|
252
|
-
if needs_rgba:
|
|
253
|
-
alpha = img.split()[3]
|
|
254
|
-
rgb = img.convert("RGB")
|
|
255
|
-
enhancer = getattr(ImageEnhance, cls_name)(rgb)
|
|
256
|
-
result = enhancer.enhance(factor).convert("RGBA")
|
|
257
|
-
result.putalpha(alpha)
|
|
258
|
-
return result
|
|
259
|
-
else:
|
|
260
|
-
enhancer = getattr(ImageEnhance, cls_name)(img)
|
|
261
|
-
return enhancer.enhance(factor)
|
|
262
|
-
|
|
263
|
-
elif engine == "pillow_ops":
|
|
264
|
-
func_name = spec["pillow_func"]
|
|
265
|
-
if needs_rgba:
|
|
266
|
-
alpha = img.split()[3]
|
|
267
|
-
rgb = img.convert("RGB")
|
|
268
|
-
else:
|
|
269
|
-
rgb = img
|
|
270
|
-
|
|
271
|
-
if func_name == "autocontrast":
|
|
272
|
-
result = ImageOps.autocontrast(rgb, cutoff=params.get("cutoff", 0))
|
|
273
|
-
elif func_name == "equalize":
|
|
274
|
-
result = ImageOps.equalize(rgb)
|
|
275
|
-
elif func_name == "invert":
|
|
276
|
-
result = ImageOps.invert(rgb)
|
|
277
|
-
elif func_name == "posterize":
|
|
278
|
-
result = ImageOps.posterize(rgb, bits=params.get("bits", 4))
|
|
279
|
-
elif func_name == "solarize":
|
|
280
|
-
result = ImageOps.solarize(rgb, threshold=params.get("threshold", 128))
|
|
281
|
-
elif func_name == "grayscale":
|
|
282
|
-
result = ImageOps.grayscale(rgb)
|
|
283
|
-
if needs_rgba:
|
|
284
|
-
result = result.convert("RGBA")
|
|
285
|
-
result.putalpha(alpha)
|
|
286
|
-
return result
|
|
287
|
-
return result
|
|
288
|
-
else:
|
|
289
|
-
return img
|
|
290
|
-
|
|
291
|
-
if needs_rgba:
|
|
292
|
-
result = result.convert("RGBA")
|
|
293
|
-
result.putalpha(alpha)
|
|
294
|
-
return result
|
|
295
|
-
|
|
296
|
-
elif engine == "pillow_filter":
|
|
297
|
-
filter_name = spec["pillow_filter"]
|
|
298
|
-
if filter_name == "GaussianBlur":
|
|
299
|
-
pf = ImageFilter.GaussianBlur(radius=params.get("radius", 2.0))
|
|
300
|
-
elif filter_name == "BoxBlur":
|
|
301
|
-
pf = ImageFilter.BoxBlur(radius=params.get("radius", 2.0))
|
|
302
|
-
elif filter_name == "UnsharpMask":
|
|
303
|
-
pf = ImageFilter.UnsharpMask(
|
|
304
|
-
radius=params.get("radius", 2.0),
|
|
305
|
-
percent=params.get("percent", 150),
|
|
306
|
-
threshold=params.get("threshold", 3),
|
|
307
|
-
)
|
|
308
|
-
elif filter_name == "SMOOTH_MORE":
|
|
309
|
-
pf = ImageFilter.SMOOTH_MORE
|
|
310
|
-
elif filter_name == "FIND_EDGES":
|
|
311
|
-
pf = ImageFilter.FIND_EDGES
|
|
312
|
-
elif filter_name == "EMBOSS":
|
|
313
|
-
pf = ImageFilter.EMBOSS
|
|
314
|
-
elif filter_name == "CONTOUR":
|
|
315
|
-
pf = ImageFilter.CONTOUR
|
|
316
|
-
elif filter_name == "DETAIL":
|
|
317
|
-
pf = ImageFilter.DETAIL
|
|
318
|
-
else:
|
|
319
|
-
return img
|
|
320
|
-
return img.filter(pf)
|
|
321
|
-
|
|
322
|
-
elif engine == "pillow_transform":
|
|
323
|
-
method = spec["pillow_method"]
|
|
324
|
-
if method == "rotate":
|
|
325
|
-
angle = params.get("angle", 0.0)
|
|
326
|
-
expand = params.get("expand", True)
|
|
327
|
-
return img.rotate(-angle, expand=expand, resample=Image.BICUBIC)
|
|
328
|
-
elif method == "flip_h":
|
|
329
|
-
return img.transpose(Image.FLIP_LEFT_RIGHT)
|
|
330
|
-
elif method == "flip_v":
|
|
331
|
-
return img.transpose(Image.FLIP_TOP_BOTTOM)
|
|
332
|
-
elif method == "resize":
|
|
333
|
-
w = params.get("width", img.width)
|
|
334
|
-
h = params.get("height", img.height)
|
|
335
|
-
resample_map = {
|
|
336
|
-
"nearest": Image.NEAREST, "bilinear": Image.BILINEAR,
|
|
337
|
-
"bicubic": Image.BICUBIC, "lanczos": Image.LANCZOS,
|
|
338
|
-
}
|
|
339
|
-
rs = resample_map.get(params.get("resample", "lanczos"), Image.LANCZOS)
|
|
340
|
-
return img.resize((w, h), rs)
|
|
341
|
-
elif method == "crop":
|
|
342
|
-
left = params.get("left", 0)
|
|
343
|
-
top = params.get("top", 0)
|
|
344
|
-
right = params.get("right", img.width)
|
|
345
|
-
bottom = params.get("bottom", img.height)
|
|
346
|
-
return img.crop((left, top, right, bottom))
|
|
347
|
-
|
|
348
|
-
elif engine == "custom":
|
|
349
|
-
func_name = spec["custom_func"]
|
|
350
|
-
if func_name == "apply_sepia":
|
|
351
|
-
return _apply_sepia(img, params.get("strength", 0.8))
|
|
352
|
-
|
|
353
|
-
return img
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
def _apply_sepia(img: Image.Image, strength: float = 0.8) -> Image.Image:
|
|
357
|
-
"""Apply sepia tone effect."""
|
|
358
|
-
needs_rgba = img.mode == "RGBA"
|
|
359
|
-
if needs_rgba:
|
|
360
|
-
alpha = img.split()[3]
|
|
361
|
-
|
|
362
|
-
gray = ImageOps.grayscale(img)
|
|
363
|
-
sepia = ImageOps.colorize(gray, "#704214", "#C0A080")
|
|
364
|
-
|
|
365
|
-
if strength < 1.0:
|
|
366
|
-
# Blend with original
|
|
367
|
-
rgb = img.convert("RGB")
|
|
368
|
-
from PIL import Image as PILImage
|
|
369
|
-
sepia = PILImage.blend(rgb, sepia, strength)
|
|
370
|
-
|
|
371
|
-
if needs_rgba:
|
|
372
|
-
sepia = sepia.convert("RGBA")
|
|
373
|
-
sepia.putalpha(alpha)
|
|
374
|
-
|
|
375
|
-
return sepia
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
def _composite_layer(
|
|
379
|
-
base: Image.Image,
|
|
380
|
-
layer: Image.Image,
|
|
381
|
-
offset_x: int,
|
|
382
|
-
offset_y: int,
|
|
383
|
-
opacity: float,
|
|
384
|
-
blend_mode: str,
|
|
385
|
-
) -> Image.Image:
|
|
386
|
-
"""Composite a layer onto the base canvas with blend mode and opacity."""
|
|
387
|
-
# Ensure both are RGBA for compositing
|
|
388
|
-
if base.mode != "RGBA":
|
|
389
|
-
base = base.convert("RGBA")
|
|
390
|
-
if layer.mode != "RGBA":
|
|
391
|
-
layer = layer.convert("RGBA")
|
|
392
|
-
|
|
393
|
-
# Apply opacity to layer
|
|
394
|
-
if opacity < 1.0:
|
|
395
|
-
alpha = layer.split()[3]
|
|
396
|
-
alpha = alpha.point(lambda a: int(a * opacity))
|
|
397
|
-
layer.putalpha(alpha)
|
|
398
|
-
|
|
399
|
-
# Create a full-canvas-sized version of the layer at the correct offset
|
|
400
|
-
layer_canvas = Image.new("RGBA", base.size, (0, 0, 0, 0))
|
|
401
|
-
layer_canvas.paste(layer, (offset_x, offset_y))
|
|
402
|
-
|
|
403
|
-
if blend_mode == "normal":
|
|
404
|
-
return Image.alpha_composite(base, layer_canvas)
|
|
405
|
-
|
|
406
|
-
# For other blend modes, we need numpy
|
|
407
|
-
try:
|
|
408
|
-
return _blend_with_mode(base, layer_canvas, blend_mode)
|
|
409
|
-
except ImportError:
|
|
410
|
-
# Fallback to normal if numpy not available
|
|
411
|
-
return Image.alpha_composite(base, layer_canvas)
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
def _blend_with_mode(base: Image.Image, layer: Image.Image, mode: str) -> Image.Image:
|
|
415
|
-
"""Apply blend mode using numpy pixel math."""
|
|
416
|
-
base_arr = np.array(base, dtype=np.float64)
|
|
417
|
-
layer_arr = np.array(layer, dtype=np.float64)
|
|
418
|
-
|
|
419
|
-
# Extract channels
|
|
420
|
-
b_rgb = base_arr[:, :, :3] / 255.0
|
|
421
|
-
l_rgb = layer_arr[:, :, :3] / 255.0
|
|
422
|
-
b_alpha = base_arr[:, :, 3:4] / 255.0
|
|
423
|
-
l_alpha = layer_arr[:, :, 3:4] / 255.0
|
|
424
|
-
|
|
425
|
-
# Apply blend formula to RGB channels
|
|
426
|
-
if mode == "multiply":
|
|
427
|
-
blended = b_rgb * l_rgb
|
|
428
|
-
elif mode == "screen":
|
|
429
|
-
blended = 1.0 - (1.0 - b_rgb) * (1.0 - l_rgb)
|
|
430
|
-
elif mode == "overlay":
|
|
431
|
-
mask = b_rgb < 0.5
|
|
432
|
-
blended = np.where(mask, 2 * b_rgb * l_rgb, 1 - 2 * (1 - b_rgb) * (1 - l_rgb))
|
|
433
|
-
elif mode == "soft_light":
|
|
434
|
-
blended = np.where(
|
|
435
|
-
l_rgb <= 0.5,
|
|
436
|
-
b_rgb - (1 - 2 * l_rgb) * b_rgb * (1 - b_rgb),
|
|
437
|
-
b_rgb + (2 * l_rgb - 1) * (np.sqrt(b_rgb) - b_rgb),
|
|
438
|
-
)
|
|
439
|
-
elif mode == "hard_light":
|
|
440
|
-
mask = l_rgb < 0.5
|
|
441
|
-
blended = np.where(mask, 2 * b_rgb * l_rgb, 1 - 2 * (1 - b_rgb) * (1 - l_rgb))
|
|
442
|
-
elif mode == "difference":
|
|
443
|
-
blended = np.abs(b_rgb - l_rgb)
|
|
444
|
-
elif mode == "darken":
|
|
445
|
-
blended = np.minimum(b_rgb, l_rgb)
|
|
446
|
-
elif mode == "lighten":
|
|
447
|
-
blended = np.maximum(b_rgb, l_rgb)
|
|
448
|
-
elif mode == "color_dodge":
|
|
449
|
-
blended = np.clip(b_rgb / (1.0 - l_rgb + 1e-10), 0, 1)
|
|
450
|
-
elif mode == "color_burn":
|
|
451
|
-
blended = np.clip(1.0 - (1.0 - b_rgb) / (l_rgb + 1e-10), 0, 1)
|
|
452
|
-
elif mode == "addition":
|
|
453
|
-
blended = np.clip(b_rgb + l_rgb, 0, 1)
|
|
454
|
-
elif mode == "subtract":
|
|
455
|
-
blended = np.clip(b_rgb - l_rgb, 0, 1)
|
|
456
|
-
elif mode == "grain_merge":
|
|
457
|
-
blended = np.clip(b_rgb + l_rgb - 0.5, 0, 1)
|
|
458
|
-
elif mode == "grain_extract":
|
|
459
|
-
blended = np.clip(b_rgb - l_rgb + 0.5, 0, 1)
|
|
460
|
-
else:
|
|
461
|
-
blended = l_rgb # Fallback to normal
|
|
462
|
-
|
|
463
|
-
# Composite: result = blended * layer_alpha + base * (1 - layer_alpha)
|
|
464
|
-
result_rgb = blended * l_alpha + b_rgb * (1.0 - l_alpha)
|
|
465
|
-
result_alpha = np.clip(b_alpha + l_alpha * (1.0 - b_alpha), 0, 1)
|
|
466
|
-
|
|
467
|
-
result = np.concatenate([result_rgb, result_alpha], axis=2)
|
|
468
|
-
result = np.clip(result * 255, 0, 255).astype(np.uint8)
|
|
469
|
-
|
|
470
|
-
return Image.fromarray(result, "RGBA")
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
def _human_size(nbytes: int) -> str:
|
|
474
|
-
"""Convert byte count to human-readable string."""
|
|
475
|
-
for unit in ("B", "KB", "MB", "GB"):
|
|
476
|
-
if nbytes < 1024:
|
|
477
|
-
return f"{nbytes:.1f} {unit}"
|
|
478
|
-
nbytes /= 1024
|
|
479
|
-
return f"{nbytes:.1f} TB"
|