background-remove 0.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +317 -0
- package/bin/cli.js +561 -0
- package/package.json +52 -0
- package/src/index.js +778 -0
package/src/index.js
ADDED
|
@@ -0,0 +1,778 @@
|
|
|
1
|
+
const sharp = require('sharp');
|
|
2
|
+
const path = require('path');
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Parse a color string into RGB values
|
|
6
|
+
*/
|
|
7
|
+
function parseColor(colorStr) {
|
|
8
|
+
// Named colors
|
|
9
|
+
const namedColors = {
|
|
10
|
+
white: [255, 255, 255],
|
|
11
|
+
black: [0, 0, 0],
|
|
12
|
+
red: [255, 0, 0],
|
|
13
|
+
green: [0, 255, 0],
|
|
14
|
+
blue: [0, 0, 255],
|
|
15
|
+
yellow: [255, 255, 0],
|
|
16
|
+
cyan: [0, 255, 255],
|
|
17
|
+
magenta: [255, 0, 255],
|
|
18
|
+
gray: [128, 128, 128],
|
|
19
|
+
grey: [128, 128, 128],
|
|
20
|
+
};
|
|
21
|
+
|
|
22
|
+
const lower = colorStr.toLowerCase();
|
|
23
|
+
if (namedColors[lower]) {
|
|
24
|
+
return namedColors[lower];
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
// RGB(r,g,b) format
|
|
28
|
+
const rgbMatch = colorStr.match(/rgb\((\d+),\s*(\d+),\s*(\d+)\)/i);
|
|
29
|
+
if (rgbMatch) {
|
|
30
|
+
return [parseInt(rgbMatch[1]), parseInt(rgbMatch[2]), parseInt(rgbMatch[3])];
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
// Hex format
|
|
34
|
+
let hex = colorStr.replace('#', '');
|
|
35
|
+
if (hex.length === 3) {
|
|
36
|
+
hex = hex.split('').map(c => c + c).join('');
|
|
37
|
+
}
|
|
38
|
+
if (hex.length === 6) {
|
|
39
|
+
return [
|
|
40
|
+
parseInt(hex.substring(0, 2), 16),
|
|
41
|
+
parseInt(hex.substring(2, 4), 16),
|
|
42
|
+
parseInt(hex.substring(4, 6), 16),
|
|
43
|
+
];
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
throw new Error(`Unable to parse color: ${colorStr}`);
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Calculate color distance (Euclidean in RGB space)
|
|
51
|
+
*/
|
|
52
|
+
function colorDistance(r1, g1, b1, r2, g2, b2) {
|
|
53
|
+
return Math.sqrt(
|
|
54
|
+
Math.pow(r1 - r2, 2) +
|
|
55
|
+
Math.pow(g1 - g2, 2) +
|
|
56
|
+
Math.pow(b1 - b2, 2)
|
|
57
|
+
);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* Method: Explicit Color - Remove a specific color
|
|
62
|
+
*/
|
|
63
|
+
async function explicitColorMethod(imageBuffer, width, height, channels, options) {
|
|
64
|
+
const targetColor = parseColor(options.color);
|
|
65
|
+
const tolerance = options.tolerance;
|
|
66
|
+
const invert = options.invert;
|
|
67
|
+
|
|
68
|
+
const data = Buffer.from(imageBuffer);
|
|
69
|
+
|
|
70
|
+
for (let i = 0; i < data.length; i += channels) {
|
|
71
|
+
const r = data[i];
|
|
72
|
+
const g = data[i + 1];
|
|
73
|
+
const b = data[i + 2];
|
|
74
|
+
|
|
75
|
+
const distance = colorDistance(r, g, b, targetColor[0], targetColor[1], targetColor[2]);
|
|
76
|
+
const isBackground = distance <= tolerance;
|
|
77
|
+
const shouldRemove = invert ? !isBackground : isBackground;
|
|
78
|
+
|
|
79
|
+
if (shouldRemove) {
|
|
80
|
+
data[i + 3] = 0; // Set alpha to 0
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
return { data, detectedColor: targetColor };
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
/**
|
|
88
|
+
* Method: Inferred Color - Auto-detect background from corners/edges
|
|
89
|
+
*/
|
|
90
|
+
async function inferredColorMethod(imageBuffer, width, height, channels, options) {
|
|
91
|
+
const data = Buffer.from(imageBuffer);
|
|
92
|
+
const radius = options.radius || 10;
|
|
93
|
+
const tolerance = options.tolerance;
|
|
94
|
+
const invert = options.invert;
|
|
95
|
+
|
|
96
|
+
// Sample corners to determine background color
|
|
97
|
+
const samples = [];
|
|
98
|
+
const positions = [
|
|
99
|
+
{ x: 0, y: 0 },
|
|
100
|
+
{ x: width - 1, y: 0 },
|
|
101
|
+
{ x: 0, y: height - 1 },
|
|
102
|
+
{ x: width - 1, y: height - 1 },
|
|
103
|
+
{ x: Math.floor(width / 2), y: 0 },
|
|
104
|
+
{ x: Math.floor(width / 2), y: height - 1 },
|
|
105
|
+
{ x: 0, y: Math.floor(height / 2) },
|
|
106
|
+
{ x: width - 1, y: Math.floor(height / 2) },
|
|
107
|
+
];
|
|
108
|
+
|
|
109
|
+
for (const pos of positions) {
|
|
110
|
+
for (let dy = 0; dy < radius && pos.y + dy < height; dy++) {
|
|
111
|
+
for (let dx = 0; dx < radius && pos.x + dx < width; dx++) {
|
|
112
|
+
const idx = ((pos.y + dy) * width + (pos.x + dx)) * channels;
|
|
113
|
+
if (idx < data.length - 2) {
|
|
114
|
+
samples.push([data[idx], data[idx + 1], data[idx + 2]]);
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
// Find dominant color through k-means-like clustering (simplified)
|
|
121
|
+
let bgColor = samples[0];
|
|
122
|
+
let minVariance = Infinity;
|
|
123
|
+
|
|
124
|
+
// Try first few samples as candidates
|
|
125
|
+
for (let i = 0; i < Math.min(20, samples.length); i++) {
|
|
126
|
+
const candidate = samples[i];
|
|
127
|
+
let variance = 0;
|
|
128
|
+
for (const sample of samples) {
|
|
129
|
+
variance += Math.pow(colorDistance(...candidate, ...sample), 2);
|
|
130
|
+
}
|
|
131
|
+
if (variance < minVariance) {
|
|
132
|
+
minVariance = variance;
|
|
133
|
+
bgColor = candidate;
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
// Apply removal
|
|
138
|
+
for (let i = 0; i < data.length; i += channels) {
|
|
139
|
+
const r = data[i];
|
|
140
|
+
const g = data[i + 1];
|
|
141
|
+
const b = data[i + 2];
|
|
142
|
+
|
|
143
|
+
const distance = colorDistance(r, g, b, bgColor[0], bgColor[1], bgColor[2]);
|
|
144
|
+
const isBackground = distance <= tolerance;
|
|
145
|
+
const shouldRemove = invert ? !isBackground : isBackground;
|
|
146
|
+
|
|
147
|
+
if (shouldRemove) {
|
|
148
|
+
data[i + 3] = 0;
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
return { data, detectedColor: bgColor };
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
/**
|
|
156
|
+
* Method: Chroma Key - Remove green/blue screen
|
|
157
|
+
*/
|
|
158
|
+
async function chromaKeyMethod(imageBuffer, width, height, channels, options) {
|
|
159
|
+
const chromaColor = options.chromaColor || 'green';
|
|
160
|
+
const tolerance = options.tolerance;
|
|
161
|
+
const invert = options.invert;
|
|
162
|
+
|
|
163
|
+
const data = Buffer.from(imageBuffer);
|
|
164
|
+
|
|
165
|
+
// Define chroma colors
|
|
166
|
+
const chromaColors = {
|
|
167
|
+
green: { primary: [0, 255, 0], range: [[0, 100], [150, 255], [0, 100]] },
|
|
168
|
+
blue: { primary: [0, 0, 255], range: [[0, 100], [0, 100], [150, 255]] },
|
|
169
|
+
red: { primary: [255, 0, 0], range: [[150, 255], [0, 100], [0, 100]] },
|
|
170
|
+
};
|
|
171
|
+
|
|
172
|
+
let chroma = chromaColors[chromaColor.toLowerCase()];
|
|
173
|
+
|
|
174
|
+
// Custom color
|
|
175
|
+
if (!chroma) {
|
|
176
|
+
const custom = parseColor(chromaColor);
|
|
177
|
+
chroma = {
|
|
178
|
+
primary: custom,
|
|
179
|
+
range: [
|
|
180
|
+
[Math.max(0, custom[0] - 80), Math.min(255, custom[0] + 80)],
|
|
181
|
+
[Math.max(0, custom[1] - 80), Math.min(255, custom[1] + 80)],
|
|
182
|
+
[Math.max(0, custom[2] - 80), Math.min(255, custom[2] + 80)],
|
|
183
|
+
]
|
|
184
|
+
};
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
for (let i = 0; i < data.length; i += channels) {
|
|
188
|
+
const r = data[i];
|
|
189
|
+
const g = data[i + 1];
|
|
190
|
+
const b = data[i + 2];
|
|
191
|
+
|
|
192
|
+
// Check if in chroma range
|
|
193
|
+
const inRange =
|
|
194
|
+
r >= chroma.range[0][0] && r <= chroma.range[0][1] &&
|
|
195
|
+
g >= chroma.range[1][0] && g <= chroma.range[1][1] &&
|
|
196
|
+
b >= chroma.range[2][0] && b <= chroma.range[2][1];
|
|
197
|
+
|
|
198
|
+
// Additional check: color should be closer to chroma than to other colors
|
|
199
|
+
const distanceToChroma = colorDistance(r, g, b, ...chroma.primary);
|
|
200
|
+
const isChroma = inRange && distanceToChroma <= tolerance + 40;
|
|
201
|
+
const shouldRemove = invert ? !isChroma : isChroma;
|
|
202
|
+
|
|
203
|
+
if (shouldRemove) {
|
|
204
|
+
data[i + 3] = 0;
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
return { data, detectedColor: chroma.primary };
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
/**
|
|
212
|
+
* Method: Flood Fill - Remove connected regions from seed points
|
|
213
|
+
*/
|
|
214
|
+
async function floodFillMethod(imageBuffer, width, height, channels, options) {
|
|
215
|
+
const tolerance = options.tolerance;
|
|
216
|
+
const seeds = options.floodSeed || ['0,0'];
|
|
217
|
+
const invert = options.invert;
|
|
218
|
+
|
|
219
|
+
const data = Buffer.from(imageBuffer);
|
|
220
|
+
const visited = new Uint8Array(width * height);
|
|
221
|
+
const toRemove = new Uint8Array(width * height);
|
|
222
|
+
const queue = [];
|
|
223
|
+
|
|
224
|
+
// Parse seed positions
|
|
225
|
+
const seedPositions = seeds.map(s => {
|
|
226
|
+
const [x, y] = s.split(',').map(Number);
|
|
227
|
+
return { x: Math.max(0, Math.min(x, width - 1)), y: Math.max(0, Math.min(y, height - 1)) };
|
|
228
|
+
});
|
|
229
|
+
|
|
230
|
+
// Get pixel color at position
|
|
231
|
+
const getColor = (x, y) => {
|
|
232
|
+
const idx = (y * width + x) * channels;
|
|
233
|
+
return [data[idx], data[idx + 1], data[idx + 2]];
|
|
234
|
+
};
|
|
235
|
+
|
|
236
|
+
// Perform flood fill from each seed
|
|
237
|
+
for (const seed of seedPositions) {
|
|
238
|
+
const seedColor = getColor(seed.x, seed.y);
|
|
239
|
+
queue.push(seed);
|
|
240
|
+
visited[seed.y * width + seed.x] = 1;
|
|
241
|
+
|
|
242
|
+
while (queue.length > 0) {
|
|
243
|
+
const { x, y } = queue.shift();
|
|
244
|
+
const idx = y * width + x;
|
|
245
|
+
const currentColor = getColor(x, y);
|
|
246
|
+
|
|
247
|
+
const distance = colorDistance(...currentColor, ...seedColor);
|
|
248
|
+
|
|
249
|
+
if (distance <= tolerance) {
|
|
250
|
+
toRemove[idx] = 1;
|
|
251
|
+
|
|
252
|
+
// Add neighbors
|
|
253
|
+
const neighbors = [
|
|
254
|
+
{ x: x + 1, y },
|
|
255
|
+
{ x: x - 1, y },
|
|
256
|
+
{ x, y: y + 1 },
|
|
257
|
+
{ x, y: y - 1 },
|
|
258
|
+
];
|
|
259
|
+
|
|
260
|
+
for (const n of neighbors) {
|
|
261
|
+
if (n.x >= 0 && n.x < width && n.y >= 0 && n.y < height) {
|
|
262
|
+
const nIdx = n.y * width + n.x;
|
|
263
|
+
if (!visited[nIdx]) {
|
|
264
|
+
visited[nIdx] = 1;
|
|
265
|
+
queue.push(n);
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
// Apply removal
|
|
274
|
+
for (let y = 0; y < height; y++) {
|
|
275
|
+
for (let x = 0; x < width; x++) {
|
|
276
|
+
const idx = y * width + x;
|
|
277
|
+
const pixelIdx = idx * channels;
|
|
278
|
+
const shouldRemove = invert ? !toRemove[idx] : toRemove[idx];
|
|
279
|
+
|
|
280
|
+
if (shouldRemove) {
|
|
281
|
+
data[pixelIdx + 3] = 0;
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
return { data, detectedColor: seedPositions.map(s => getColor(s.x, s.y).join(',')) };
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
/**
|
|
290
|
+
* Method: Edge Detection + Flood Fill - Detect edges and remove outside
|
|
291
|
+
*/
|
|
292
|
+
async function edgesMethod(imageBuffer, width, height, channels, options) {
|
|
293
|
+
const tolerance = options.tolerance;
|
|
294
|
+
const distance = options.distance || 10;
|
|
295
|
+
const invert = options.invert;
|
|
296
|
+
|
|
297
|
+
const data = Buffer.from(imageBuffer);
|
|
298
|
+
|
|
299
|
+
// Simple edge detection using gradient
|
|
300
|
+
const edges = new Uint8Array(width * height);
|
|
301
|
+
|
|
302
|
+
for (let y = 1; y < height - 1; y++) {
|
|
303
|
+
for (let x = 1; x < width - 1; x++) {
|
|
304
|
+
const idx = (y * width + x) * channels;
|
|
305
|
+
|
|
306
|
+
// Calculate gradient magnitude
|
|
307
|
+
const left = (y * width + (x - 1)) * channels;
|
|
308
|
+
const right = (y * width + (x + 1)) * channels;
|
|
309
|
+
const up = ((y - 1) * width + x) * channels;
|
|
310
|
+
const down = ((y + 1) * width + x) * channels;
|
|
311
|
+
|
|
312
|
+
const dx = (
|
|
313
|
+
Math.abs(data[left] - data[right]) +
|
|
314
|
+
Math.abs(data[left + 1] - data[right + 1]) +
|
|
315
|
+
Math.abs(data[left + 2] - data[right + 2])
|
|
316
|
+
) / 3;
|
|
317
|
+
|
|
318
|
+
const dy = (
|
|
319
|
+
Math.abs(data[up] - data[down]) +
|
|
320
|
+
Math.abs(data[up + 1] - data[down + 1]) +
|
|
321
|
+
Math.abs(data[up + 2] - data[down + 2])
|
|
322
|
+
) / 3;
|
|
323
|
+
|
|
324
|
+
const gradient = Math.sqrt(dx * dx + dy * dy);
|
|
325
|
+
|
|
326
|
+
if (gradient > distance * 2) {
|
|
327
|
+
edges[y * width + x] = 1;
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
// Find background by flooding from corners, respecting edges
|
|
333
|
+
const visited = new Uint8Array(width * height);
|
|
334
|
+
const toRemove = new Uint8Array(width * height);
|
|
335
|
+
const queue = [];
|
|
336
|
+
|
|
337
|
+
const corners = [
|
|
338
|
+
{ x: 0, y: 0 },
|
|
339
|
+
{ x: width - 1, y: 0 },
|
|
340
|
+
{ x: 0, y: height - 1 },
|
|
341
|
+
{ x: width - 1, y: height - 1 },
|
|
342
|
+
];
|
|
343
|
+
|
|
344
|
+
const getColor = (x, y) => {
|
|
345
|
+
const idx = (y * width + x) * channels;
|
|
346
|
+
return [data[idx], data[idx + 1], data[idx + 2]];
|
|
347
|
+
};
|
|
348
|
+
|
|
349
|
+
for (const corner of corners) {
|
|
350
|
+
const cornerColor = getColor(corner.x, corner.y);
|
|
351
|
+
queue.push(corner);
|
|
352
|
+
visited[corner.y * width + corner.x] = 1;
|
|
353
|
+
|
|
354
|
+
while (queue.length > 0) {
|
|
355
|
+
const { x, y } = queue.shift();
|
|
356
|
+
const idx = y * width + x;
|
|
357
|
+
const currentColor = getColor(x, y);
|
|
358
|
+
|
|
359
|
+
const colorDist = colorDistance(...currentColor, ...cornerColor);
|
|
360
|
+
|
|
361
|
+
if (colorDist <= tolerance && !edges[idx]) {
|
|
362
|
+
toRemove[idx] = 1;
|
|
363
|
+
|
|
364
|
+
const neighbors = [
|
|
365
|
+
{ x: x + 1, y },
|
|
366
|
+
{ x: x - 1, y },
|
|
367
|
+
{ x, y: y + 1 },
|
|
368
|
+
{ x, y: y - 1 },
|
|
369
|
+
];
|
|
370
|
+
|
|
371
|
+
for (const n of neighbors) {
|
|
372
|
+
if (n.x >= 0 && n.x < width && n.y >= 0 && n.y < height) {
|
|
373
|
+
const nIdx = n.y * width + n.x;
|
|
374
|
+
if (!visited[nIdx]) {
|
|
375
|
+
visited[nIdx] = 1;
|
|
376
|
+
queue.push(n);
|
|
377
|
+
}
|
|
378
|
+
}
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
// Apply removal
|
|
385
|
+
for (let y = 0; y < height; y++) {
|
|
386
|
+
for (let x = 0; x < width; x++) {
|
|
387
|
+
const idx = y * width + x;
|
|
388
|
+
const pixelIdx = idx * channels;
|
|
389
|
+
const shouldRemove = invert ? !toRemove[idx] : toRemove[idx];
|
|
390
|
+
|
|
391
|
+
if (shouldRemove) {
|
|
392
|
+
data[pixelIdx + 3] = 0;
|
|
393
|
+
}
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
return { data, detectedColor: null };
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
/**
|
|
401
|
+
* Method: Auto - Try multiple methods and pick best result
|
|
402
|
+
*/
|
|
403
|
+
async function autoMethod(imageBuffer, width, height, channels, options) {
|
|
404
|
+
// Try inferred first (works for most common cases)
|
|
405
|
+
return inferredColorMethod(imageBuffer, width, height, channels, options);
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
/**
|
|
409
|
+
* Apply post-processing effects
|
|
410
|
+
*/
|
|
411
|
+
async function postProcess(data, width, height, channels, options) {
|
|
412
|
+
const feather = options.feather || 0;
|
|
413
|
+
const smooth = options.smooth || false;
|
|
414
|
+
const antialias = options.antialias || false;
|
|
415
|
+
|
|
416
|
+
if (feather > 0) {
|
|
417
|
+
data = await applyFeather(data, width, height, channels, feather);
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
if (smooth) {
|
|
421
|
+
data = await applySmoothing(data, width, height, channels);
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
if (antialias) {
|
|
425
|
+
data = await applyAntialias(data, width, height, channels);
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
return data;
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
/**
|
|
432
|
+
* Apply feather effect to edges
|
|
433
|
+
*/
|
|
434
|
+
async function applyFeather(data, width, height, channels, amount) {
|
|
435
|
+
const result = Buffer.from(data);
|
|
436
|
+
const alpha = new Float32Array(width * height);
|
|
437
|
+
|
|
438
|
+
// Copy alpha channel
|
|
439
|
+
for (let i = 0; i < width * height; i++) {
|
|
440
|
+
alpha[i] = data[i * channels + 3];
|
|
441
|
+
}
|
|
442
|
+
|
|
443
|
+
// Gaussian blur on alpha
|
|
444
|
+
const kernelSize = Math.ceil(amount * 2) * 2 + 1;
|
|
445
|
+
const sigma = amount;
|
|
446
|
+
|
|
447
|
+
// Simple box blur approximation
|
|
448
|
+
for (let pass = 0; pass < 3; pass++) {
|
|
449
|
+
const newAlpha = new Float32Array(width * height);
|
|
450
|
+
|
|
451
|
+
for (let y = 0; y < height; y++) {
|
|
452
|
+
for (let x = 0; x < width; x++) {
|
|
453
|
+
let sum = 0;
|
|
454
|
+
let count = 0;
|
|
455
|
+
|
|
456
|
+
for (let dy = -Math.ceil(amount); dy <= Math.ceil(amount); dy++) {
|
|
457
|
+
for (let dx = -Math.ceil(amount); dx <= Math.ceil(amount); dx++) {
|
|
458
|
+
const nx = x + dx;
|
|
459
|
+
const ny = y + dy;
|
|
460
|
+
if (nx >= 0 && nx < width && ny >= 0 && ny < height) {
|
|
461
|
+
sum += alpha[ny * width + nx];
|
|
462
|
+
count++;
|
|
463
|
+
}
|
|
464
|
+
}
|
|
465
|
+
}
|
|
466
|
+
|
|
467
|
+
newAlpha[y * width + x] = sum / count;
|
|
468
|
+
}
|
|
469
|
+
}
|
|
470
|
+
|
|
471
|
+
for (let i = 0; i < width * height; i++) {
|
|
472
|
+
alpha[i] = newAlpha[i];
|
|
473
|
+
}
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
// Apply blurred alpha
|
|
477
|
+
for (let i = 0; i < width * height; i++) {
|
|
478
|
+
result[i * channels + 3] = Math.round(alpha[i]);
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
return result;
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
/**
|
|
485
|
+
* Apply smoothing to edges
|
|
486
|
+
*/
|
|
487
|
+
async function applySmoothing(data, width, height, channels) {
|
|
488
|
+
const result = Buffer.from(data);
|
|
489
|
+
|
|
490
|
+
for (let y = 1; y < height - 1; y++) {
|
|
491
|
+
for (let x = 1; x < width - 1; x++) {
|
|
492
|
+
const idx = (y * width + x) * channels;
|
|
493
|
+
|
|
494
|
+
// Check if this is an edge pixel (alpha transition)
|
|
495
|
+
const currentAlpha = data[idx + 3];
|
|
496
|
+
let edgeCount = 0;
|
|
497
|
+
|
|
498
|
+
const neighbors = [
|
|
499
|
+
((y - 1) * width + x) * channels,
|
|
500
|
+
((y + 1) * width + x) * channels,
|
|
501
|
+
(y * width + (x - 1)) * channels,
|
|
502
|
+
(y * width + (x + 1)) * channels,
|
|
503
|
+
];
|
|
504
|
+
|
|
505
|
+
for (const nIdx of neighbors) {
|
|
506
|
+
if (Math.abs(data[nIdx + 3] - currentAlpha) > 128) {
|
|
507
|
+
edgeCount++;
|
|
508
|
+
}
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
// Smooth edge pixels
|
|
512
|
+
if (edgeCount > 0 && edgeCount < 4) {
|
|
513
|
+
let r = 0, g = 0, b = 0, a = 0, count = 0;
|
|
514
|
+
|
|
515
|
+
for (let dy = -1; dy <= 1; dy++) {
|
|
516
|
+
for (let dx = -1; dx <= 1; dx++) {
|
|
517
|
+
const nx = x + dx;
|
|
518
|
+
const ny = y + dy;
|
|
519
|
+
if (nx >= 0 && nx < width && ny >= 0 && ny < height) {
|
|
520
|
+
const nIdx = (ny * width + nx) * channels;
|
|
521
|
+
r += data[nIdx];
|
|
522
|
+
g += data[nIdx + 1];
|
|
523
|
+
b += data[nIdx + 2];
|
|
524
|
+
a += data[nIdx + 3];
|
|
525
|
+
count++;
|
|
526
|
+
}
|
|
527
|
+
}
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
result[idx] = r / count;
|
|
531
|
+
result[idx + 1] = g / count;
|
|
532
|
+
result[idx + 2] = b / count;
|
|
533
|
+
result[idx + 3] = a / count;
|
|
534
|
+
}
|
|
535
|
+
}
|
|
536
|
+
}
|
|
537
|
+
|
|
538
|
+
return result;
|
|
539
|
+
}
|
|
540
|
+
|
|
541
|
+
/**
|
|
542
|
+
* Apply antialiasing
|
|
543
|
+
*/
|
|
544
|
+
async function applyAntialias(data, width, height, channels) {
|
|
545
|
+
return applySmoothing(data, width, height, channels);
|
|
546
|
+
}
|
|
547
|
+
|
|
548
|
+
/**
|
|
549
|
+
* Main removal function
|
|
550
|
+
*/
|
|
551
|
+
async function removeBackground(inputPath, outputPath, options) {
|
|
552
|
+
const method = options.method || 'auto';
|
|
553
|
+
|
|
554
|
+
// Load image
|
|
555
|
+
const image = sharp(inputPath);
|
|
556
|
+
const metadata = await image.metadata();
|
|
557
|
+
|
|
558
|
+
// Ensure we have alpha channel
|
|
559
|
+
const raw = await image.ensureAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
560
|
+
const { data, info } = raw;
|
|
561
|
+
const { width, height, channels } = info;
|
|
562
|
+
|
|
563
|
+
// Apply selected method
|
|
564
|
+
let result;
|
|
565
|
+
let detectedColor = null;
|
|
566
|
+
|
|
567
|
+
switch (method) {
|
|
568
|
+
case 'color':
|
|
569
|
+
result = await explicitColorMethod(data, width, height, channels, options);
|
|
570
|
+
detectedColor = result.detectedColor;
|
|
571
|
+
break;
|
|
572
|
+
case 'inferred':
|
|
573
|
+
result = await inferredColorMethod(data, width, height, channels, options);
|
|
574
|
+
detectedColor = result.detectedColor;
|
|
575
|
+
break;
|
|
576
|
+
case 'chroma':
|
|
577
|
+
result = await chromaKeyMethod(data, width, height, channels, options);
|
|
578
|
+
detectedColor = result.detectedColor;
|
|
579
|
+
break;
|
|
580
|
+
case 'flood':
|
|
581
|
+
result = await floodFillMethod(data, width, height, channels, options);
|
|
582
|
+
detectedColor = result.detectedColor;
|
|
583
|
+
break;
|
|
584
|
+
case 'edges':
|
|
585
|
+
result = await edgesMethod(data, width, height, channels, options);
|
|
586
|
+
break;
|
|
587
|
+
case 'auto':
|
|
588
|
+
default:
|
|
589
|
+
result = await autoMethod(data, width, height, channels, options);
|
|
590
|
+
detectedColor = result.detectedColor;
|
|
591
|
+
break;
|
|
592
|
+
}
|
|
593
|
+
|
|
594
|
+
// Post-process
|
|
595
|
+
let processedData = await postProcess(result.data, width, height, channels, options);
|
|
596
|
+
|
|
597
|
+
// Save output
|
|
598
|
+
const format = options.format || 'png';
|
|
599
|
+
let outputSharp = sharp(processedData, {
|
|
600
|
+
raw: { width, height, channels },
|
|
601
|
+
});
|
|
602
|
+
|
|
603
|
+
// Apply format
|
|
604
|
+
switch (format.toLowerCase()) {
|
|
605
|
+
case 'jpeg':
|
|
606
|
+
case 'jpg':
|
|
607
|
+
outputSharp = outputSharp.jpeg({ quality: options.quality || 90 });
|
|
608
|
+
break;
|
|
609
|
+
case 'webp':
|
|
610
|
+
outputSharp = outputSharp.webp({ quality: options.quality || 90 });
|
|
611
|
+
break;
|
|
612
|
+
case 'png':
|
|
613
|
+
default:
|
|
614
|
+
outputSharp = outputSharp.png({ compressionLevel: 9 });
|
|
615
|
+
break;
|
|
616
|
+
}
|
|
617
|
+
|
|
618
|
+
await outputSharp.toFile(outputPath);
|
|
619
|
+
|
|
620
|
+
return {
|
|
621
|
+
outputPath,
|
|
622
|
+
width,
|
|
623
|
+
height,
|
|
624
|
+
method,
|
|
625
|
+
detectedColor,
|
|
626
|
+
};
|
|
627
|
+
}
|
|
628
|
+
|
|
629
|
+
/**
|
|
630
|
+
* Generate mask preview
|
|
631
|
+
*/
|
|
632
|
+
async function generateMask(inputPath, outputPath, options) {
|
|
633
|
+
const method = options.method || 'auto';
|
|
634
|
+
|
|
635
|
+
const image = sharp(inputPath);
|
|
636
|
+
const metadata = await image.metadata();
|
|
637
|
+
|
|
638
|
+
const raw = await image.ensureAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
639
|
+
const { data, info } = raw;
|
|
640
|
+
const { width, height, channels } = info;
|
|
641
|
+
|
|
642
|
+
// Create a copy for processing
|
|
643
|
+
let processed;
|
|
644
|
+
|
|
645
|
+
switch (method) {
|
|
646
|
+
case 'color':
|
|
647
|
+
processed = await explicitColorMethod(data, width, height, channels, options);
|
|
648
|
+
break;
|
|
649
|
+
case 'inferred':
|
|
650
|
+
processed = await inferredColorMethod(data, width, height, channels, options);
|
|
651
|
+
break;
|
|
652
|
+
case 'chroma':
|
|
653
|
+
processed = await chromaKeyMethod(data, width, height, channels, options);
|
|
654
|
+
break;
|
|
655
|
+
case 'flood':
|
|
656
|
+
processed = await floodFillMethod(data, width, height, channels, options);
|
|
657
|
+
break;
|
|
658
|
+
case 'edges':
|
|
659
|
+
processed = await edgesMethod(data, width, height, channels, options);
|
|
660
|
+
break;
|
|
661
|
+
case 'auto':
|
|
662
|
+
default:
|
|
663
|
+
processed = await autoMethod(data, width, height, channels, options);
|
|
664
|
+
break;
|
|
665
|
+
}
|
|
666
|
+
|
|
667
|
+
// Create mask image (white = keep, black = remove)
|
|
668
|
+
const maskData = Buffer.alloc(width * height * 3);
|
|
669
|
+
|
|
670
|
+
for (let i = 0; i < width * height; i++) {
|
|
671
|
+
const alpha = processed.data[i * channels + 3];
|
|
672
|
+
const val = alpha > 128 ? 255 : 0;
|
|
673
|
+
maskData[i * 3] = val;
|
|
674
|
+
maskData[i * 3 + 1] = val;
|
|
675
|
+
maskData[i * 3 + 2] = val;
|
|
676
|
+
}
|
|
677
|
+
|
|
678
|
+
await sharp(maskData, {
|
|
679
|
+
raw: { width, height, channels: 3 },
|
|
680
|
+
})
|
|
681
|
+
.png()
|
|
682
|
+
.toFile(outputPath);
|
|
683
|
+
|
|
684
|
+
return { outputPath };
|
|
685
|
+
}
|
|
686
|
+
|
|
687
|
+
/**
|
|
688
|
+
* List all available methods with AI-friendly descriptions
|
|
689
|
+
*/
|
|
690
|
+
function listMethods() {
|
|
691
|
+
return [
|
|
692
|
+
{
|
|
693
|
+
name: 'auto',
|
|
694
|
+
description: 'Automatically selects the best method based on image analysis. Uses inferred color detection as the default strategy.',
|
|
695
|
+
options: ['tolerance', 'feather', 'smooth', 'antialias', 'quality', 'format'],
|
|
696
|
+
bestFor: [
|
|
697
|
+
'Unknown or untested images',
|
|
698
|
+
'Quick batch processing',
|
|
699
|
+
'Mixed content types',
|
|
700
|
+
'When unsure which method to choose'
|
|
701
|
+
],
|
|
702
|
+
example: 'background-remove remove photo.jpg --method auto',
|
|
703
|
+
},
|
|
704
|
+
{
|
|
705
|
+
name: 'color',
|
|
706
|
+
description: 'Remove a specific exact color value. Most reliable when you know the precise background color (e.g., pure white #FFFFFF or black #000000).',
|
|
707
|
+
options: ['color (required)', 'tolerance', 'invert', 'feather', 'smooth'],
|
|
708
|
+
bestFor: [
|
|
709
|
+
'Solid color backgrounds with known color',
|
|
710
|
+
'White or black backgrounds',
|
|
711
|
+
'Brand-colored backgrounds (exact hex known)',
|
|
712
|
+
'Screenshots with solid fills',
|
|
713
|
+
'Logos with known background colors'
|
|
714
|
+
],
|
|
715
|
+
example: 'background-remove remove logo.png --method color --color "#FFFFFF" --tolerance 30',
|
|
716
|
+
},
|
|
717
|
+
{
|
|
718
|
+
name: 'inferred',
|
|
719
|
+
description: 'Auto-detects the background color by sampling pixels from corners and edges of the image. Assumes background is uniform and at image boundaries.',
|
|
720
|
+
options: ['tolerance', 'radius', 'invert', 'feather'],
|
|
721
|
+
bestFor: [
|
|
722
|
+
'Product photos with white/light backgrounds',
|
|
723
|
+
'Portraits with plain walls',
|
|
724
|
+
'Images where subject is centered',
|
|
725
|
+
'When background color is unknown but solid',
|
|
726
|
+
'Screenshots and digital images'
|
|
727
|
+
],
|
|
728
|
+
example: 'background-remove remove product.jpg --method inferred --tolerance 40 --radius 15',
|
|
729
|
+
},
|
|
730
|
+
{
|
|
731
|
+
name: 'chroma',
|
|
732
|
+
description: 'Chroma key (green screen / blue screen) removal. Optimized for removing specific chroma colors while preserving foreground subject.',
|
|
733
|
+
options: ['chroma-color', 'tolerance', 'invert', 'feather', 'smooth'],
|
|
734
|
+
bestFor: [
|
|
735
|
+
'Green screen photography',
|
|
736
|
+
'Blue screen video frames',
|
|
737
|
+
'Studio chroma key shots',
|
|
738
|
+
'Video conferencing backgrounds',
|
|
739
|
+
'Any color-keyed studio setup'
|
|
740
|
+
],
|
|
741
|
+
example: 'background-remove remove greenscreen.jpg --method chroma --chroma-color green --tolerance 50',
|
|
742
|
+
},
|
|
743
|
+
{
|
|
744
|
+
name: 'flood',
|
|
745
|
+
description: 'Flood fill algorithm starts from seed point(s) and removes connected regions of similar color. Effective when background touches image edges.',
|
|
746
|
+
options: ['flood-seed', 'tolerance', 'invert'],
|
|
747
|
+
bestFor: [
|
|
748
|
+
'Diagrams and charts',
|
|
749
|
+
'Product photos with connected backgrounds',
|
|
750
|
+
'Images with multiple background regions',
|
|
751
|
+
'When you need precise control over start point',
|
|
752
|
+
'Complex layouts with uniform fills'
|
|
753
|
+
],
|
|
754
|
+
example: 'background-remove remove diagram.png --method flood --flood-seed 0,0 --tolerance 25',
|
|
755
|
+
},
|
|
756
|
+
{
|
|
757
|
+
name: 'edges',
|
|
758
|
+
description: 'Detects edges using gradient analysis and removes areas outside detected subject boundaries. Good for high-contrast subjects.',
|
|
759
|
+
options: ['tolerance', 'distance', 'invert', 'feather'],
|
|
760
|
+
bestFor: [
|
|
761
|
+
'High-contrast subjects against background',
|
|
762
|
+
'Objects with clear boundaries',
|
|
763
|
+
'Dark subjects on light backgrounds (or vice versa)',
|
|
764
|
+
'When other methods include too much background',
|
|
765
|
+
'Silhouette-style images'
|
|
766
|
+
],
|
|
767
|
+
example: 'background-remove remove object.jpg --method edges --distance 15 --tolerance 35',
|
|
768
|
+
},
|
|
769
|
+
];
|
|
770
|
+
}
|
|
771
|
+
|
|
772
|
+
module.exports = {
|
|
773
|
+
removeBackground,
|
|
774
|
+
generateMask,
|
|
775
|
+
listMethods,
|
|
776
|
+
parseColor,
|
|
777
|
+
colorDistance,
|
|
778
|
+
};
|