spec-cat 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. package/.output/nitro.json +1 -1
  2. package/.output/public/_nuxt/{Bqlz6CoK.js → BE_75kPa.js} +1 -1
  3. package/.output/public/_nuxt/{B2wdmh_w.js → BJ7m4fRW.js} +53 -53
  4. package/.output/public/_nuxt/{KNuzSjk0.js → CCNYUZ9m.js} +1 -1
  5. package/.output/public/_nuxt/{BvosqTnx.js → DGtcdWVl.js} +1 -1
  6. package/.output/public/_nuxt/DxEx-kFx.js +1 -0
  7. package/.output/public/_nuxt/{BwcbSlWF.js → DyMq_cQC.js} +2 -2
  8. package/.output/public/_nuxt/{COTT6rNZ.js → _cj5lOdZ.js} +1 -1
  9. package/.output/public/_nuxt/builds/latest.json +1 -1
  10. package/.output/public/_nuxt/builds/meta/3a0aacc1-0bd1-4d15-8b8a-3cee48cbfc69.json +1 -0
  11. package/.output/public/_nuxt/{BUOk7wkI.js → gDut6QrP.js} +1 -1
  12. package/.output/public/_nuxt/{C5wk2twv.js → nJpWpjzg.js} +1 -1
  13. package/.output/public/_nuxt/{DBab5Zcv.js → waQ9fPC1.js} +1 -1
  14. package/.output/server/chunks/_/codexProvider.mjs +64 -18
  15. package/.output/server/chunks/_/codexProvider.mjs.map +1 -1
  16. package/.output/server/chunks/build/client.precomputed.mjs +1 -1
  17. package/.output/server/chunks/build/client.precomputed.mjs.map +1 -1
  18. package/.output/server/chunks/nitro/nitro.mjs +702 -703
  19. package/.output/server/chunks/routes/_ws.mjs +37 -7
  20. package/.output/server/chunks/routes/_ws.mjs.map +1 -1
  21. package/.output/server/node_modules/@huggingface/jinja/dist/index.js +1572 -0
  22. package/.output/server/node_modules/@huggingface/jinja/package.json +55 -0
  23. package/.output/server/node_modules/@xenova/transformers/package.json +84 -0
  24. package/.output/server/node_modules/@xenova/transformers/src/backends/onnx.js +50 -0
  25. package/.output/server/node_modules/@xenova/transformers/src/configs.js +107 -0
  26. package/.output/server/node_modules/@xenova/transformers/src/env.js +128 -0
  27. package/.output/server/node_modules/@xenova/transformers/src/models.js +6267 -0
  28. package/.output/server/node_modules/@xenova/transformers/src/pipelines.js +3287 -0
  29. package/.output/server/node_modules/@xenova/transformers/src/processors.js +2248 -0
  30. package/.output/server/node_modules/@xenova/transformers/src/tokenizers.js +4479 -0
  31. package/.output/server/node_modules/@xenova/transformers/src/transformers.js +24 -0
  32. package/.output/server/node_modules/@xenova/transformers/src/utils/audio.js +672 -0
  33. package/.output/server/node_modules/@xenova/transformers/src/utils/core.js +175 -0
  34. package/.output/server/node_modules/@xenova/transformers/src/utils/data-structures.js +415 -0
  35. package/.output/server/node_modules/@xenova/transformers/src/utils/generation.js +873 -0
  36. package/.output/server/node_modules/@xenova/transformers/src/utils/hub.js +658 -0
  37. package/.output/server/node_modules/@xenova/transformers/src/utils/image.js +731 -0
  38. package/.output/server/node_modules/@xenova/transformers/src/utils/maths.js +985 -0
  39. package/.output/server/node_modules/@xenova/transformers/src/utils/tensor.js +1239 -0
  40. package/.output/server/node_modules/color/index.js +496 -0
  41. package/.output/server/node_modules/color/package.json +47 -0
  42. package/.output/server/node_modules/color-convert/conversions.js +839 -0
  43. package/.output/server/node_modules/color-convert/index.js +81 -0
  44. package/.output/server/node_modules/color-convert/package.json +48 -0
  45. package/.output/server/node_modules/color-convert/route.js +97 -0
  46. package/.output/server/node_modules/color-name/index.js +152 -0
  47. package/.output/server/node_modules/color-name/package.json +28 -0
  48. package/.output/server/node_modules/color-string/index.js +242 -0
  49. package/.output/server/node_modules/color-string/package.json +39 -0
  50. package/.output/server/node_modules/detect-libc/lib/detect-libc.js +313 -0
  51. package/.output/server/node_modules/detect-libc/lib/elf.js +39 -0
  52. package/.output/server/node_modules/detect-libc/lib/filesystem.js +51 -0
  53. package/.output/server/node_modules/detect-libc/lib/process.js +24 -0
  54. package/.output/server/node_modules/detect-libc/package.json +44 -0
  55. package/.output/server/node_modules/is-arrayish/index.js +9 -0
  56. package/.output/server/node_modules/is-arrayish/package.json +45 -0
  57. package/.output/server/node_modules/onnxruntime-common/dist/ort-common.node.js +7 -0
  58. package/.output/server/node_modules/onnxruntime-common/package.json +31 -0
  59. package/.output/server/node_modules/onnxruntime-node/bin/napi-v3/darwin/arm64/onnxruntime_binding.node +0 -0
  60. package/.output/server/node_modules/onnxruntime-node/bin/napi-v3/darwin/x64/onnxruntime_binding.node +0 -0
  61. package/.output/server/node_modules/onnxruntime-node/bin/napi-v3/linux/arm64/libonnxruntime.so.1.14.0 +0 -0
  62. package/.output/server/node_modules/onnxruntime-node/bin/napi-v3/linux/arm64/onnxruntime_binding.node +0 -0
  63. package/.output/server/node_modules/onnxruntime-node/bin/napi-v3/linux/x64/libonnxruntime.so.1.14.0 +0 -0
  64. package/.output/server/node_modules/onnxruntime-node/bin/napi-v3/linux/x64/onnxruntime_binding.node +0 -0
  65. package/.output/server/node_modules/onnxruntime-node/bin/napi-v3/win32/arm64/onnxruntime_binding.node +0 -0
  66. package/.output/server/node_modules/onnxruntime-node/bin/napi-v3/win32/x64/onnxruntime_binding.node +0 -0
  67. package/.output/server/node_modules/onnxruntime-node/dist/backend.js +75 -0
  68. package/.output/server/node_modules/onnxruntime-node/dist/binding.js +10 -0
  69. package/.output/server/node_modules/onnxruntime-node/dist/index.js +23 -0
  70. package/.output/server/node_modules/onnxruntime-node/package.json +58 -0
  71. package/.output/server/node_modules/onnxruntime-web/dist/ort-web.node.js +7 -0
  72. package/.output/server/node_modules/onnxruntime-web/package.json +84 -0
  73. package/.output/server/node_modules/semver/classes/semver.js +333 -0
  74. package/.output/server/node_modules/semver/functions/coerce.js +62 -0
  75. package/.output/server/node_modules/semver/functions/compare.js +7 -0
  76. package/.output/server/node_modules/semver/functions/gte.js +5 -0
  77. package/.output/server/node_modules/semver/functions/parse.js +18 -0
  78. package/.output/server/node_modules/semver/internal/constants.js +37 -0
  79. package/.output/server/node_modules/semver/internal/debug.js +11 -0
  80. package/.output/server/node_modules/semver/internal/identifiers.js +29 -0
  81. package/.output/server/node_modules/semver/internal/parse-options.js +17 -0
  82. package/.output/server/node_modules/semver/internal/re.js +223 -0
  83. package/.output/server/node_modules/semver/package.json +78 -0
  84. package/.output/server/node_modules/sharp/build/Release/sharp-linux-x64.node +0 -0
  85. package/.output/server/node_modules/sharp/lib/channel.js +174 -0
  86. package/.output/server/node_modules/sharp/lib/colour.js +184 -0
  87. package/.output/server/node_modules/sharp/lib/composite.js +210 -0
  88. package/.output/server/node_modules/sharp/lib/constructor.js +439 -0
  89. package/.output/server/node_modules/sharp/lib/index.js +16 -0
  90. package/.output/server/node_modules/sharp/lib/input.js +631 -0
  91. package/.output/server/node_modules/sharp/lib/is.js +155 -0
  92. package/.output/server/node_modules/sharp/lib/libvips.js +140 -0
  93. package/.output/server/node_modules/sharp/lib/operation.js +919 -0
  94. package/.output/server/node_modules/sharp/lib/output.js +1413 -0
  95. package/.output/server/node_modules/sharp/lib/platform.js +30 -0
  96. package/.output/server/node_modules/sharp/lib/resize.js +582 -0
  97. package/.output/server/node_modules/sharp/lib/sharp.js +38 -0
  98. package/.output/server/node_modules/sharp/lib/utility.js +287 -0
  99. package/.output/server/node_modules/sharp/package.json +204 -0
  100. package/.output/server/node_modules/sharp/vendor/8.14.5/linux-x64/THIRD-PARTY-NOTICES.md +43 -0
  101. package/.output/server/node_modules/sharp/vendor/8.14.5/linux-x64/lib/libvips-cpp.so.42 +0 -0
  102. package/.output/server/node_modules/sharp/vendor/8.14.5/linux-x64/platform.json +1 -0
  103. package/.output/server/node_modules/sharp/vendor/8.14.5/linux-x64/versions.json +31 -0
  104. package/.output/server/node_modules/simple-swizzle/index.js +29 -0
  105. package/.output/server/node_modules/simple-swizzle/package.json +36 -0
  106. package/.output/server/package.json +15 -1
  107. package/README.md +2 -0
  108. package/package.json +12 -19
  109. package/.output/public/_nuxt/5FxpIoe_.js +0 -1
  110. package/.output/public/_nuxt/builds/meta/21578a05-1b7e-4847-a8ff-7480800ea4a6.json +0 -1
@@ -0,0 +1,731 @@
1
+
2
+ /**
3
+ * @file Helper module for image processing.
4
+ *
5
+ * These functions and classes are only used internally,
6
+ * meaning an end-user shouldn't need to access anything here.
7
+ *
8
+ * @module utils/image
9
+ */
10
+
11
+ import { getFile } from './hub.js';
12
+ import { env } from '../env.js';
13
+ import { Tensor } from './tensor.js';
14
+
15
+ // Will be empty (or not used) if running in browser or web-worker
16
+ import sharp from 'sharp';
17
+
18
+ const BROWSER_ENV = typeof self !== 'undefined';
19
+ const WEBWORKER_ENV = BROWSER_ENV && self.constructor.name === 'DedicatedWorkerGlobalScope';
20
+
21
+ let createCanvasFunction;
22
+ let ImageDataClass;
23
+ let loadImageFunction;
24
+ if (BROWSER_ENV) {
25
+ // Running in browser or web-worker
26
+ createCanvasFunction = (/** @type {number} */ width, /** @type {number} */ height) => {
27
+ if (!self.OffscreenCanvas) {
28
+ throw new Error('OffscreenCanvas not supported by this browser.');
29
+ }
30
+ return new self.OffscreenCanvas(width, height)
31
+ };
32
+ loadImageFunction = self.createImageBitmap;
33
+ ImageDataClass = self.ImageData;
34
+
35
+ } else if (sharp) {
36
+ // Running in Node.js, electron, or other non-browser environment
37
+
38
+ loadImageFunction = async (/**@type {sharp.Sharp}*/img) => {
39
+ const metadata = await img.metadata();
40
+ const rawChannels = metadata.channels;
41
+
42
+ let { data, info } = await img.rotate().raw().toBuffer({ resolveWithObject: true });
43
+
44
+ const newImage = new RawImage(new Uint8ClampedArray(data), info.width, info.height, info.channels);
45
+ if (rawChannels !== undefined && rawChannels !== info.channels) {
46
+ // Make sure the new image has the same number of channels as the input image.
47
+ // This is necessary for grayscale images.
48
+ newImage.convert(rawChannels);
49
+ }
50
+ return newImage;
51
+ }
52
+
53
+ } else {
54
+ throw new Error('Unable to load image processing library.');
55
+ }
56
+
57
+
58
+ // Defined here: https://github.com/python-pillow/Pillow/blob/a405e8406b83f8bfb8916e93971edc7407b8b1ff/src/libImaging/Imaging.h#L262-L268
59
+ const RESAMPLING_MAPPING = {
60
+ 0: 'nearest',
61
+ 1: 'lanczos',
62
+ 2: 'bilinear',
63
+ 3: 'bicubic',
64
+ 4: 'box',
65
+ 5: 'hamming',
66
+ }
67
+
68
+ /**
69
+ * Mapping from file extensions to MIME types.
70
+ */
71
+ const CONTENT_TYPE_MAP = new Map([
72
+ ['png', 'image/png'],
73
+ ['jpg', 'image/jpeg'],
74
+ ['jpeg', 'image/jpeg'],
75
+ ['gif', 'image/gif'],
76
+ ]);
77
+
78
+ export class RawImage {
79
+
80
+ /**
81
+ * Create a new `RawImage` object.
82
+ * @param {Uint8ClampedArray|Uint8Array} data The pixel data.
83
+ * @param {number} width The width of the image.
84
+ * @param {number} height The height of the image.
85
+ * @param {1|2|3|4} channels The number of channels.
86
+ */
87
+ constructor(data, width, height, channels) {
88
+ this.data = data;
89
+ this.width = width;
90
+ this.height = height;
91
+ this.channels = channels;
92
+ }
93
+
94
+ /**
95
+ * Returns the size of the image (width, height).
96
+ * @returns {[number, number]} The size of the image (width, height).
97
+ */
98
+ get size() {
99
+ return [this.width, this.height];
100
+ }
101
+
102
+ /**
103
+ * Helper method for reading an image from a variety of input types.
104
+ * @param {RawImage|string|URL} input
105
+ * @returns The image object.
106
+ *
107
+ * **Example:** Read image from a URL.
108
+ * ```javascript
109
+ * let image = await RawImage.read('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg');
110
+ * // RawImage {
111
+ * // "data": Uint8ClampedArray [ 25, 25, 25, 19, 19, 19, ... ],
112
+ * // "width": 800,
113
+ * // "height": 533,
114
+ * // "channels": 3
115
+ * // }
116
+ * ```
117
+ */
118
+ static async read(input) {
119
+ if (input instanceof RawImage) {
120
+ return input;
121
+ } else if (typeof input === 'string' || input instanceof URL) {
122
+ return await this.fromURL(input);
123
+ } else {
124
+ throw new Error(`Unsupported input type: ${typeof input}`);
125
+ }
126
+ }
127
+
128
+
129
+ /**
130
+ * Read an image from a URL or file path.
131
+ * @param {string|URL} url The URL or file path to read the image from.
132
+ * @returns {Promise<RawImage>} The image object.
133
+ */
134
+ static async fromURL(url) {
135
+ let response = await getFile(url);
136
+ if (response.status !== 200) {
137
+ throw new Error(`Unable to read image from "${url}" (${response.status} ${response.statusText})`);
138
+ }
139
+ let blob = await response.blob();
140
+ return this.fromBlob(blob);
141
+ }
142
+
143
+ /**
144
+ * Helper method to create a new Image from a blob.
145
+ * @param {Blob} blob The blob to read the image from.
146
+ * @returns {Promise<RawImage>} The image object.
147
+ */
148
+ static async fromBlob(blob) {
149
+ if (BROWSER_ENV) {
150
+ // Running in environment with canvas
151
+ let img = await loadImageFunction(blob);
152
+
153
+ const ctx = createCanvasFunction(img.width, img.height).getContext('2d');
154
+
155
+ // Draw image to context
156
+ ctx.drawImage(img, 0, 0);
157
+
158
+ return new this(ctx.getImageData(0, 0, img.width, img.height).data, img.width, img.height, 4);
159
+
160
+ } else {
161
+ // Use sharp.js to read (and possible resize) the image.
162
+ let img = sharp(await blob.arrayBuffer());
163
+
164
+ return await loadImageFunction(img);
165
+ }
166
+ }
167
+
168
+ /**
169
+ * Helper method to create a new Image from a tensor
170
+ * @param {Tensor} tensor
171
+ */
172
+ static fromTensor(tensor, channel_format = 'CHW') {
173
+ if (tensor.dims.length !== 3) {
174
+ throw new Error(`Tensor should have 3 dimensions, but has ${tensor.dims.length} dimensions.`);
175
+ }
176
+
177
+ if (channel_format === 'CHW') {
178
+ tensor = tensor.transpose(1, 2, 0);
179
+ } else if (channel_format === 'HWC') {
180
+ // Do nothing
181
+ } else {
182
+ throw new Error(`Unsupported channel format: ${channel_format}`);
183
+ }
184
+ if (!(tensor.data instanceof Uint8ClampedArray || tensor.data instanceof Uint8Array)) {
185
+ throw new Error(`Unsupported tensor type: ${tensor.type}`);
186
+ }
187
+ switch (tensor.dims[2]) {
188
+ case 1:
189
+ case 2:
190
+ case 3:
191
+ case 4:
192
+ return new RawImage(tensor.data, tensor.dims[1], tensor.dims[0], tensor.dims[2]);
193
+ default:
194
+ throw new Error(`Unsupported number of channels: ${tensor.dims[2]}`);
195
+ }
196
+ }
197
+
198
+ /**
199
+ * Convert the image to grayscale format.
200
+ * @returns {RawImage} `this` to support chaining.
201
+ */
202
+ grayscale() {
203
+ if (this.channels === 1) {
204
+ return this;
205
+ }
206
+
207
+ let newData = new Uint8ClampedArray(this.width * this.height * 1);
208
+ switch (this.channels) {
209
+ case 3: // rgb to grayscale
210
+ case 4: // rgba to grayscale
211
+ for (let i = 0, offset = 0; i < this.data.length; i += this.channels) {
212
+ const red = this.data[i];
213
+ const green = this.data[i + 1];
214
+ const blue = this.data[i + 2];
215
+
216
+ newData[offset++] = Math.round(0.2989 * red + 0.5870 * green + 0.1140 * blue);
217
+ }
218
+ break;
219
+ default:
220
+ throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`);
221
+ }
222
+ return this._update(newData, this.width, this.height, 1);
223
+ }
224
+
225
+ /**
226
+ * Convert the image to RGB format.
227
+ * @returns {RawImage} `this` to support chaining.
228
+ */
229
+ rgb() {
230
+ if (this.channels === 3) {
231
+ return this;
232
+ }
233
+
234
+ let newData = new Uint8ClampedArray(this.width * this.height * 3);
235
+
236
+ switch (this.channels) {
237
+ case 1: // grayscale to rgb
238
+ for (let i = 0, offset = 0; i < this.data.length; ++i) {
239
+ newData[offset++] = this.data[i];
240
+ newData[offset++] = this.data[i];
241
+ newData[offset++] = this.data[i];
242
+ }
243
+ break;
244
+ case 4: // rgba to rgb
245
+ for (let i = 0, offset = 0; i < this.data.length; i += 4) {
246
+ newData[offset++] = this.data[i];
247
+ newData[offset++] = this.data[i + 1];
248
+ newData[offset++] = this.data[i + 2];
249
+ }
250
+ break;
251
+ default:
252
+ throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`);
253
+ }
254
+ return this._update(newData, this.width, this.height, 3);
255
+
256
+ }
257
+
258
+ /**
259
+ * Convert the image to RGBA format.
260
+ * @returns {RawImage} `this` to support chaining.
261
+ */
262
+ rgba() {
263
+ if (this.channels === 4) {
264
+ return this;
265
+ }
266
+
267
+ let newData = new Uint8ClampedArray(this.width * this.height * 4);
268
+
269
+ switch (this.channels) {
270
+ case 1: // grayscale to rgba
271
+ for (let i = 0, offset = 0; i < this.data.length; ++i) {
272
+ newData[offset++] = this.data[i];
273
+ newData[offset++] = this.data[i];
274
+ newData[offset++] = this.data[i];
275
+ newData[offset++] = 255;
276
+ }
277
+ break;
278
+ case 3: // rgb to rgba
279
+ for (let i = 0, offset = 0; i < this.data.length; i += 3) {
280
+ newData[offset++] = this.data[i];
281
+ newData[offset++] = this.data[i + 1];
282
+ newData[offset++] = this.data[i + 2];
283
+ newData[offset++] = 255;
284
+ }
285
+ break;
286
+ default:
287
+ throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`);
288
+ }
289
+
290
+ return this._update(newData, this.width, this.height, 4);
291
+ }
292
+
293
+ /**
294
+ * Resize the image to the given dimensions. This method uses the canvas API to perform the resizing.
295
+ * @param {number} width The width of the new image.
296
+ * @param {number} height The height of the new image.
297
+ * @param {Object} options Additional options for resizing.
298
+ * @param {0|1|2|3|4|5|string} [options.resample] The resampling method to use.
299
+ * @returns {Promise<RawImage>} `this` to support chaining.
300
+ */
301
+ async resize(width, height, {
302
+ resample = 2,
303
+ } = {}) {
304
+
305
+ // Ensure resample method is a string
306
+ let resampleMethod = RESAMPLING_MAPPING[resample] ?? resample;
307
+
308
+ if (BROWSER_ENV) {
309
+ // TODO use `resample` in browser environment
310
+
311
+ // Store number of channels before resizing
312
+ let numChannels = this.channels;
313
+
314
+ // Create canvas object for this image
315
+ let canvas = this.toCanvas();
316
+
317
+ // Actually perform resizing using the canvas API
318
+ const ctx = createCanvasFunction(width, height).getContext('2d');
319
+
320
+ // Draw image to context, resizing in the process
321
+ ctx.drawImage(canvas, 0, 0, width, height);
322
+
323
+ // Create image from the resized data
324
+ let resizedImage = new RawImage(ctx.getImageData(0, 0, width, height).data, width, height, 4);
325
+
326
+ // Convert back so that image has the same number of channels as before
327
+ return resizedImage.convert(numChannels);
328
+
329
+ } else {
330
+ // Create sharp image from raw data, and resize
331
+ let img = this.toSharp();
332
+
333
+ switch (resampleMethod) {
334
+ case 'box':
335
+ case 'hamming':
336
+ if (resampleMethod === 'box' || resampleMethod === 'hamming') {
337
+ console.warn(`Resampling method ${resampleMethod} is not yet supported. Using bilinear instead.`);
338
+ resampleMethod = 'bilinear';
339
+ }
340
+
341
+ case 'nearest':
342
+ case 'bilinear':
343
+ case 'bicubic':
344
+ // Perform resizing using affine transform.
345
+ // This matches how the python Pillow library does it.
346
+ img = img.affine([width / this.width, 0, 0, height / this.height], {
347
+ interpolator: resampleMethod
348
+ });
349
+ break;
350
+
351
+ case 'lanczos':
352
+ // https://github.com/python-pillow/Pillow/discussions/5519
353
+ // https://github.com/lovell/sharp/blob/main/docs/api-resize.md
354
+ img = img.resize({
355
+ width, height,
356
+ fit: 'fill',
357
+ kernel: 'lanczos3', // PIL Lanczos uses a kernel size of 3
358
+ });
359
+ break;
360
+
361
+ default:
362
+ throw new Error(`Resampling method ${resampleMethod} is not supported.`);
363
+ }
364
+
365
+ return await loadImageFunction(img);
366
+ }
367
+
368
+ }
369
+
370
+ async pad([left, right, top, bottom]) {
371
+ left = Math.max(left, 0);
372
+ right = Math.max(right, 0);
373
+ top = Math.max(top, 0);
374
+ bottom = Math.max(bottom, 0);
375
+
376
+ if (left === 0 && right === 0 && top === 0 && bottom === 0) {
377
+ // No padding needed
378
+ return this;
379
+ }
380
+
381
+ if (BROWSER_ENV) {
382
+ // Store number of channels before padding
383
+ let numChannels = this.channels;
384
+
385
+ // Create canvas object for this image
386
+ let canvas = this.toCanvas();
387
+
388
+ let newWidth = this.width + left + right;
389
+ let newHeight = this.height + top + bottom;
390
+
391
+ // Create a new canvas of the desired size.
392
+ const ctx = createCanvasFunction(newWidth, newHeight).getContext('2d');
393
+
394
+ // Draw image to context, padding in the process
395
+ ctx.drawImage(canvas,
396
+ 0, 0, this.width, this.height,
397
+ left, top, newWidth, newHeight
398
+ );
399
+
400
+ // Create image from the padded data
401
+ let paddedImage = new RawImage(
402
+ ctx.getImageData(0, 0, newWidth, newHeight).data,
403
+ newWidth, newHeight, 4);
404
+
405
+ // Convert back so that image has the same number of channels as before
406
+ return paddedImage.convert(numChannels);
407
+
408
+ } else {
409
+ let img = this.toSharp().extend({ left, right, top, bottom });
410
+ return await loadImageFunction(img);
411
+ }
412
+ }
413
+
414
+ async crop([x_min, y_min, x_max, y_max]) {
415
+ // Ensure crop bounds are within the image
416
+ x_min = Math.max(x_min, 0);
417
+ y_min = Math.max(y_min, 0);
418
+ x_max = Math.min(x_max, this.width - 1);
419
+ y_max = Math.min(y_max, this.height - 1);
420
+
421
+ // Do nothing if the crop is the entire image
422
+ if (x_min === 0 && y_min === 0 && x_max === this.width - 1 && y_max === this.height - 1) {
423
+ return this;
424
+ }
425
+
426
+ const crop_width = x_max - x_min + 1;
427
+ const crop_height = y_max - y_min + 1;
428
+
429
+ if (BROWSER_ENV) {
430
+ // Store number of channels before resizing
431
+ const numChannels = this.channels;
432
+
433
+ // Create canvas object for this image
434
+ const canvas = this.toCanvas();
435
+
436
+ // Create a new canvas of the desired size. This is needed since if the
437
+ // image is too small, we need to pad it with black pixels.
438
+ const ctx = createCanvasFunction(crop_width, crop_height).getContext('2d');
439
+
440
+ // Draw image to context, cropping in the process
441
+ ctx.drawImage(canvas,
442
+ x_min, y_min, crop_width, crop_height,
443
+ 0, 0, crop_width, crop_height
444
+ );
445
+
446
+ // Create image from the resized data
447
+ const resizedImage = new RawImage(ctx.getImageData(0, 0, crop_width, crop_height).data, crop_width, crop_height, 4);
448
+
449
+ // Convert back so that image has the same number of channels as before
450
+ return resizedImage.convert(numChannels);
451
+
452
+ } else {
453
+ // Create sharp image from raw data
454
+ const img = this.toSharp().extract({
455
+ left: x_min,
456
+ top: y_min,
457
+ width: crop_width,
458
+ height: crop_height,
459
+ });
460
+
461
+ return await loadImageFunction(img);
462
+ }
463
+
464
+ }
465
+
466
+ async center_crop(crop_width, crop_height) {
467
+ // If the image is already the desired size, return it
468
+ if (this.width === crop_width && this.height === crop_height) {
469
+ return this;
470
+ }
471
+
472
+ // Determine bounds of the image in the new canvas
473
+ let width_offset = (this.width - crop_width) / 2;
474
+ let height_offset = (this.height - crop_height) / 2;
475
+
476
+
477
+ if (BROWSER_ENV) {
478
+ // Store number of channels before resizing
479
+ let numChannels = this.channels;
480
+
481
+ // Create canvas object for this image
482
+ let canvas = this.toCanvas();
483
+
484
+ // Create a new canvas of the desired size. This is needed since if the
485
+ // image is too small, we need to pad it with black pixels.
486
+ const ctx = createCanvasFunction(crop_width, crop_height).getContext('2d');
487
+
488
+ let sourceX = 0;
489
+ let sourceY = 0;
490
+ let destX = 0;
491
+ let destY = 0;
492
+
493
+ if (width_offset >= 0) {
494
+ sourceX = width_offset;
495
+ } else {
496
+ destX = -width_offset;
497
+ }
498
+
499
+ if (height_offset >= 0) {
500
+ sourceY = height_offset;
501
+ } else {
502
+ destY = -height_offset;
503
+ }
504
+
505
+ // Draw image to context, cropping in the process
506
+ ctx.drawImage(canvas,
507
+ sourceX, sourceY, crop_width, crop_height,
508
+ destX, destY, crop_width, crop_height
509
+ );
510
+
511
+ // Create image from the resized data
512
+ let resizedImage = new RawImage(ctx.getImageData(0, 0, crop_width, crop_height).data, crop_width, crop_height, 4);
513
+
514
+ // Convert back so that image has the same number of channels as before
515
+ return resizedImage.convert(numChannels);
516
+
517
+ } else {
518
+ // Create sharp image from raw data
519
+ let img = this.toSharp();
520
+
521
+ if (width_offset >= 0 && height_offset >= 0) {
522
+ // Cropped image lies entirely within the original image
523
+ img = img.extract({
524
+ left: Math.floor(width_offset),
525
+ top: Math.floor(height_offset),
526
+ width: crop_width,
527
+ height: crop_height,
528
+ })
529
+ } else if (width_offset <= 0 && height_offset <= 0) {
530
+ // Cropped image lies entirely outside the original image,
531
+ // so we add padding
532
+ let top = Math.floor(-height_offset);
533
+ let left = Math.floor(-width_offset);
534
+ img = img.extend({
535
+ top: top,
536
+ left: left,
537
+
538
+ // Ensures the resulting image has the desired dimensions
539
+ right: crop_width - this.width - left,
540
+ bottom: crop_height - this.height - top,
541
+ });
542
+ } else {
543
+ // Cropped image lies partially outside the original image.
544
+ // We first pad, then crop.
545
+
546
+ let y_padding = [0, 0];
547
+ let y_extract = 0;
548
+ if (height_offset < 0) {
549
+ y_padding[0] = Math.floor(-height_offset);
550
+ y_padding[1] = crop_height - this.height - y_padding[0];
551
+ } else {
552
+ y_extract = Math.floor(height_offset);
553
+ }
554
+
555
+ let x_padding = [0, 0];
556
+ let x_extract = 0;
557
+ if (width_offset < 0) {
558
+ x_padding[0] = Math.floor(-width_offset);
559
+ x_padding[1] = crop_width - this.width - x_padding[0];
560
+ } else {
561
+ x_extract = Math.floor(width_offset);
562
+ }
563
+
564
+ img = img.extend({
565
+ top: y_padding[0],
566
+ bottom: y_padding[1],
567
+ left: x_padding[0],
568
+ right: x_padding[1],
569
+ }).extract({
570
+ left: x_extract,
571
+ top: y_extract,
572
+ width: crop_width,
573
+ height: crop_height,
574
+ })
575
+ }
576
+
577
+ return await loadImageFunction(img);
578
+ }
579
+ }
580
+
581
+ async toBlob(type = 'image/png', quality = 1) {
582
+ if (!BROWSER_ENV) {
583
+ throw new Error('toBlob() is only supported in browser environments.')
584
+ }
585
+
586
+ const canvas = this.toCanvas();
587
+ return await canvas.convertToBlob({ type, quality });
588
+ }
589
+
590
+ toTensor(channel_format = 'CHW') {
591
+ let tensor = new Tensor(
592
+ 'uint8',
593
+ new Uint8Array(this.data),
594
+ [this.height, this.width, this.channels]
595
+ );
596
+
597
+ if (channel_format === 'HWC') {
598
+ // Do nothing
599
+ } else if (channel_format === 'CHW') { // hwc -> chw
600
+ tensor = tensor.permute(2, 0, 1);
601
+ } else {
602
+ throw new Error(`Unsupported channel format: ${channel_format}`);
603
+ }
604
+ return tensor;
605
+ }
606
+
607
+ toCanvas() {
608
+ if (!BROWSER_ENV) {
609
+ throw new Error('toCanvas() is only supported in browser environments.')
610
+ }
611
+
612
+ // Clone, and convert data to RGBA before drawing to canvas.
613
+ // This is because the canvas API only supports RGBA
614
+ let cloned = this.clone().rgba();
615
+
616
+ // Create canvas object for the cloned image
617
+ let clonedCanvas = createCanvasFunction(cloned.width, cloned.height);
618
+
619
+ // Draw image to context
620
+ let data = new ImageDataClass(cloned.data, cloned.width, cloned.height);
621
+ clonedCanvas.getContext('2d').putImageData(data, 0, 0);
622
+
623
+ return clonedCanvas;
624
+ }
625
+
626
+ /**
627
+ * Helper method to update the image data.
628
+ * @param {Uint8ClampedArray} data The new image data.
629
+ * @param {number} width The new width of the image.
630
+ * @param {number} height The new height of the image.
631
+ * @param {1|2|3|4|null} [channels] The new number of channels of the image.
632
+ * @private
633
+ */
634
+ _update(data, width, height, channels = null) {
635
+ this.data = data;
636
+ this.width = width;
637
+ this.height = height;
638
+ if (channels !== null) {
639
+ this.channels = channels;
640
+ }
641
+ return this;
642
+ }
643
+
644
+ /**
645
+ * Clone the image
646
+ * @returns {RawImage} The cloned image
647
+ */
648
+ clone() {
649
+ return new RawImage(this.data.slice(), this.width, this.height, this.channels);
650
+ }
651
+
652
+ /**
653
+ * Helper method for converting image to have a certain number of channels
654
+ * @param {number} numChannels The number of channels. Must be 1, 3, or 4.
655
+ * @returns {RawImage} `this` to support chaining.
656
+ */
657
+ convert(numChannels) {
658
+ if (this.channels === numChannels) return this; // Already correct number of channels
659
+
660
+ switch (numChannels) {
661
+ case 1:
662
+ this.grayscale();
663
+ break;
664
+ case 3:
665
+ this.rgb();
666
+ break;
667
+ case 4:
668
+ this.rgba();
669
+ break;
670
+ default:
671
+ throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`);
672
+ }
673
+ return this;
674
+ }
675
+
676
+ /**
677
+ * Save the image to the given path.
678
+ * @param {string} path The path to save the image to.
679
+ */
680
+ async save(path) {
681
+
682
+ if (BROWSER_ENV) {
683
+ if (WEBWORKER_ENV) {
684
+ throw new Error('Unable to save an image from a Web Worker.')
685
+ }
686
+
687
+ const extension = path.split('.').pop().toLowerCase();
688
+ const mime = CONTENT_TYPE_MAP.get(extension) ?? 'image/png';
689
+
690
+ // Convert image to Blob
691
+ const blob = await this.toBlob(mime);
692
+
693
+ // Convert the canvas content to a data URL
694
+ const dataURL = URL.createObjectURL(blob);
695
+
696
+ // Create an anchor element with the data URL as the href attribute
697
+ const downloadLink = document.createElement('a');
698
+ downloadLink.href = dataURL;
699
+
700
+ // Set the download attribute to specify the desired filename for the downloaded image
701
+ downloadLink.download = path;
702
+
703
+ // Trigger the download
704
+ downloadLink.click();
705
+
706
+ // Clean up: remove the anchor element from the DOM
707
+ downloadLink.remove();
708
+
709
+ } else if (!env.useFS) {
710
+ throw new Error('Unable to save the image because filesystem is disabled in this environment.')
711
+
712
+ } else {
713
+ const img = this.toSharp();
714
+ return await img.toFile(path);
715
+ }
716
+ }
717
+
718
+ toSharp() {
719
+ if (BROWSER_ENV) {
720
+ throw new Error('toSharp() is only supported in server-side environments.')
721
+ }
722
+
723
+ return sharp(this.data, {
724
+ raw: {
725
+ width: this.width,
726
+ height: this.height,
727
+ channels: this.channels
728
+ }
729
+ });
730
+ }
731
+ }