@js-ak/excel-toolbox 1.0.0 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/build/index.d.ts +1 -1
- package/build/index.js +1 -1
- package/build/lib/index.d.ts +1 -1
- package/build/lib/index.js +1 -1
- package/build/lib/merge-sheets-to-base-file.d.ts +20 -3
- package/build/lib/merge-sheets-to-base-file.d.ts.map +1 -1
- package/build/lib/merge-sheets-to-base-file.js +106 -7
- package/build/lib/xml/build-merged-sheet.js +3 -3
- package/build/lib/xml/extract-rows-from-sheet.js +5 -5
- package/build/lib/xml/extract-xml-from-sheet.d.ts.map +1 -1
- package/build/lib/xml/extract-xml-from-sheet.js +12 -18
- package/build/lib/xml/extract-xml-from-system-content.js +7 -7
- package/build/lib/zip/constants.d.ts +1 -1
- package/build/lib/zip/constants.js +4 -4
- package/build/lib/zip/create.d.ts +1 -1
- package/build/lib/zip/create.js +6 -6
- package/build/lib/zip/index.d.ts +2 -2
- package/build/lib/zip/index.js +2 -2
- package/build/lib/zip/read.d.ts.map +1 -1
- package/build/lib/zip/read.js +3 -3
- package/build/lib/zip/utils.d.ts +1 -1
- package/build/lib/zip/utils.js +1 -1
- package/package.json +2 -1
package/build/index.d.ts
CHANGED
@@ -1,2 +1,2 @@
|
|
1
|
-
export * from
|
1
|
+
export * from "./lib/index.js";
|
2
2
|
//# sourceMappingURL=index.d.ts.map
|
package/build/index.js
CHANGED
@@ -1 +1 @@
|
|
1
|
-
export * from
|
1
|
+
export * from "./lib/index.js";
|
package/build/lib/index.d.ts
CHANGED
@@ -1,2 +1,2 @@
|
|
1
|
-
export * from
|
1
|
+
export * from "./merge-sheets-to-base-file.js";
|
2
2
|
//# sourceMappingURL=index.d.ts.map
|
package/build/lib/index.js
CHANGED
@@ -1 +1 @@
|
|
1
|
-
export * from
|
1
|
+
export * from "./merge-sheets-to-base-file.js";
|
@@ -1,12 +1,29 @@
|
|
1
1
|
/// <reference types="node" />
|
2
2
|
/// <reference types="node" />
|
3
|
+
/**
|
4
|
+
* Merge rows from other Excel files into a base Excel file.
|
5
|
+
* The output is a new Excel file with the merged content.
|
6
|
+
*
|
7
|
+
* @param {Object} data
|
8
|
+
* @param {Object[]} data.additions
|
9
|
+
* @param {Buffer} data.additions.file - The file to extract rows from
|
10
|
+
* @param {number} data.additions.sheetIndex - The 1-based index of the sheet to extract rows from
|
11
|
+
* @param {Buffer} data.baseFile - The base file to add rows to
|
12
|
+
* @param {number} [data.baseSheetIndex=1] - The 1-based index of the sheet in the base file to add rows to
|
13
|
+
* @param {number} [data.gap=1] - The number of empty rows to insert between each added section
|
14
|
+
* @param {string[]} [data.sheetNamesToRemove=[]] - The names of sheets to remove from the output file
|
15
|
+
* @param {number[]} [data.sheetsToRemove=[]] - The 1-based indices of sheets to remove from the output file
|
16
|
+
* @returns {Buffer} - The merged Excel file
|
17
|
+
*/
|
3
18
|
export declare function mergeSheetsToBaseFile(data: {
|
4
|
-
baseFile: Buffer;
|
5
|
-
baseSheetIndex?: number;
|
6
19
|
additions: {
|
7
20
|
file: Buffer;
|
8
21
|
sheetIndex: number;
|
9
22
|
}[];
|
23
|
+
baseFile: Buffer;
|
24
|
+
baseSheetIndex?: number;
|
10
25
|
gap?: number;
|
11
|
-
|
26
|
+
sheetNamesToRemove?: string[];
|
27
|
+
sheetsToRemove?: number[];
|
28
|
+
}): Buffer;
|
12
29
|
//# sourceMappingURL=merge-sheets-to-base-file.d.ts.map
|
@@ -1 +1 @@
|
|
1
|
-
{"version":3,"file":"merge-sheets-to-base-file.d.ts","sourceRoot":"","sources":["../../src/lib/merge-sheets-to-base-file.ts"],"names":[],"mappings":";;AAMA,
|
1
|
+
{"version":3,"file":"merge-sheets-to-base-file.d.ts","sourceRoot":"","sources":["../../src/lib/merge-sheets-to-base-file.ts"],"names":[],"mappings":";;AAMA;;;;;;;;;;;;;;GAcG;AACH,wBAAgB,qBAAqB,CAAC,IAAI,EAAE;IAC3C,SAAS,EAAE;QAAE,IAAI,EAAE,MAAM,CAAC;QAAC,UAAU,EAAE,MAAM,CAAA;KAAE,EAAE,CAAC;IAClD,QAAQ,EAAE,MAAM,CAAC;IACjB,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,GAAG,CAAC,EAAE,MAAM,CAAC;IACb,kBAAkB,CAAC,EAAE,MAAM,EAAE,CAAC;IAC9B,cAAc,CAAC,EAAE,MAAM,EAAE,CAAC;CAC1B,GAAG,MAAM,CAgGT"}
|
@@ -1,9 +1,24 @@
|
|
1
|
-
import {
|
2
|
-
import {
|
3
|
-
import {
|
4
|
-
import * as Zip from
|
5
|
-
|
6
|
-
|
1
|
+
import { buildMergedSheet } from "./xml/build-merged-sheet.js";
|
2
|
+
import { extractRowsFromSheet } from "./xml/extract-rows-from-sheet.js";
|
3
|
+
import { shiftRowIndices } from "./xml/shift-row-indices.js";
|
4
|
+
import * as Zip from "./zip/index.js";
|
5
|
+
/**
|
6
|
+
* Merge rows from other Excel files into a base Excel file.
|
7
|
+
* The output is a new Excel file with the merged content.
|
8
|
+
*
|
9
|
+
* @param {Object} data
|
10
|
+
* @param {Object[]} data.additions
|
11
|
+
* @param {Buffer} data.additions.file - The file to extract rows from
|
12
|
+
* @param {number} data.additions.sheetIndex - The 1-based index of the sheet to extract rows from
|
13
|
+
* @param {Buffer} data.baseFile - The base file to add rows to
|
14
|
+
* @param {number} [data.baseSheetIndex=1] - The 1-based index of the sheet in the base file to add rows to
|
15
|
+
* @param {number} [data.gap=1] - The number of empty rows to insert between each added section
|
16
|
+
* @param {string[]} [data.sheetNamesToRemove=[]] - The names of sheets to remove from the output file
|
17
|
+
* @param {number[]} [data.sheetsToRemove=[]] - The 1-based indices of sheets to remove from the output file
|
18
|
+
* @returns {Buffer} - The merged Excel file
|
19
|
+
*/
|
20
|
+
export function mergeSheetsToBaseFile(data) {
|
21
|
+
const { additions = [], baseFile, baseSheetIndex = 1, gap = 1, sheetNamesToRemove = [], sheetsToRemove = [], } = data;
|
7
22
|
const baseFiles = Zip.read(baseFile);
|
8
23
|
const basePath = `xl/worksheets/sheet${baseSheetIndex}.xml`;
|
9
24
|
if (!baseFiles[basePath]) {
|
@@ -22,7 +37,7 @@ export async function mergeSheetsToBaseFile(data) {
|
|
22
37
|
const { mergeCells, rows } = extractRowsFromSheet(files[sheetPath]);
|
23
38
|
const shiftedRows = shiftRowIndices(rows, currentRowOffset);
|
24
39
|
const shiftedMergeCells = (mergeCells || []).map(cell => {
|
25
|
-
const [start, end] = cell.ref.split(
|
40
|
+
const [start, end] = cell.ref.split(":");
|
26
41
|
if (!start || !end) {
|
27
42
|
return cell;
|
28
43
|
}
|
@@ -36,6 +51,22 @@ export async function mergeSheetsToBaseFile(data) {
|
|
36
51
|
}
|
37
52
|
const mergedXml = buildMergedSheet(baseFiles[basePath], allRows, allMergeCells);
|
38
53
|
baseFiles[basePath] = mergedXml;
|
54
|
+
for (const sheetIndex of sheetsToRemove) {
|
55
|
+
const sheetPath = `xl/worksheets/sheet${sheetIndex}.xml`;
|
56
|
+
delete baseFiles[sheetPath];
|
57
|
+
if (baseFiles["xl/workbook.xml"]) {
|
58
|
+
baseFiles["xl/workbook.xml"] = removeSheetFromWorkbook(baseFiles["xl/workbook.xml"], sheetIndex);
|
59
|
+
}
|
60
|
+
if (baseFiles["xl/_rels/workbook.xml.rels"]) {
|
61
|
+
baseFiles["xl/_rels/workbook.xml.rels"] = removeSheetFromRels(baseFiles["xl/_rels/workbook.xml.rels"], sheetIndex);
|
62
|
+
}
|
63
|
+
if (baseFiles["[Content_Types].xml"]) {
|
64
|
+
baseFiles["[Content_Types].xml"] = removeSheetFromContentTypes(baseFiles["[Content_Types].xml"], sheetIndex);
|
65
|
+
}
|
66
|
+
}
|
67
|
+
for (const sheetName of sheetNamesToRemove) {
|
68
|
+
removeSheetByName(baseFiles, sheetName);
|
69
|
+
}
|
39
70
|
const zip = Zip.create(baseFiles);
|
40
71
|
return zip;
|
41
72
|
}
|
@@ -89,3 +120,71 @@ function getMaxRowNumber(rows) {
|
|
89
120
|
}
|
90
121
|
return max;
|
91
122
|
}
|
123
|
+
/**
|
124
|
+
* Removes the specified sheet from the workbook (xl/workbook.xml).
|
125
|
+
* @param {string} xml - The workbook file contents as a string
|
126
|
+
* @param {number} sheetIndex - The 1-based index of the sheet to remove
|
127
|
+
* @returns {string} - The modified workbook file contents
|
128
|
+
*/
|
129
|
+
function removeSheetFromWorkbook(xml, sheetIndex) {
|
130
|
+
return xml.replace(new RegExp(`<sheet[^>]+sheetId=["']${sheetIndex}["'][^>]*/>`, "g"), "");
|
131
|
+
}
|
132
|
+
/**
|
133
|
+
* Removes the specified sheet from the workbook relationships file (xl/_rels/workbook.xml.rels).
|
134
|
+
* @param {string} xml - The workbook relationships file contents as a string
|
135
|
+
* @param {number} sheetIndex - The 1-based index of the sheet to remove
|
136
|
+
* @returns {string} - The modified workbook relationships file contents
|
137
|
+
*/
|
138
|
+
function removeSheetFromRels(xml, sheetIndex) {
|
139
|
+
return xml.replace(new RegExp(`<Relationship[^>]+Target=["']worksheets/sheet${sheetIndex}\\.xml["'][^>]*/>`, "g"), "");
|
140
|
+
}
|
141
|
+
/**
|
142
|
+
* Removes the specified sheet from the Content_Types.xml file.
|
143
|
+
* @param {string} xml - The Content_Types.xml file contents as a string
|
144
|
+
* @param {number} sheetIndex - The 1-based index of the sheet to remove
|
145
|
+
* @returns {string} - The modified Content_Types.xml file contents
|
146
|
+
*/
|
147
|
+
function removeSheetFromContentTypes(xml, sheetIndex) {
|
148
|
+
return xml.replace(new RegExp(`<Override[^>]+PartName=["']/xl/worksheets/sheet${sheetIndex}\\.xml["'][^>]*/>`, "g"), "");
|
149
|
+
}
|
150
|
+
/**
|
151
|
+
* Removes a sheet from the Excel workbook by name.
|
152
|
+
* @param {Object.<string, string | Buffer>} files - The dictionary of files in the workbook.
|
153
|
+
* @param {string} sheetName - The name of the sheet to remove.
|
154
|
+
* @returns {void}
|
155
|
+
*/
|
156
|
+
function removeSheetByName(files, sheetName) {
|
157
|
+
const workbookXml = files["xl/workbook.xml"];
|
158
|
+
const relsXml = files["xl/_rels/workbook.xml.rels"];
|
159
|
+
if (!workbookXml || !relsXml) {
|
160
|
+
return;
|
161
|
+
}
|
162
|
+
const sheetMatch = workbookXml.match(new RegExp(`<sheet[^>]+name=["']${sheetName}["'][^>]*/>`));
|
163
|
+
if (!sheetMatch) {
|
164
|
+
return;
|
165
|
+
}
|
166
|
+
const sheetTag = sheetMatch[0];
|
167
|
+
const sheetIdMatch = sheetTag.match(/sheetId=["'](\d+)["']/);
|
168
|
+
const ridMatch = sheetTag.match(/r:id=["'](rId\d+)["']/);
|
169
|
+
if (!sheetIdMatch || !ridMatch) {
|
170
|
+
return;
|
171
|
+
}
|
172
|
+
const relId = ridMatch[1];
|
173
|
+
const relMatch = relsXml.match(new RegExp(`<Relationship[^>]+Id=["']${relId}["'][^>]+Target=["']([^"']+)["'][^>]*/>`));
|
174
|
+
if (!relMatch) {
|
175
|
+
return;
|
176
|
+
}
|
177
|
+
const relTag = relMatch[0];
|
178
|
+
const targetMatch = relTag.match(/Target=["']([^"']+)["']/);
|
179
|
+
if (!targetMatch) {
|
180
|
+
return;
|
181
|
+
}
|
182
|
+
const targetPath = `xl/${targetMatch[1]}`.replace(/\\/g, "/");
|
183
|
+
delete files[targetPath];
|
184
|
+
files["xl/workbook.xml"] = workbookXml.replace(sheetTag, "");
|
185
|
+
files["xl/_rels/workbook.xml.rels"] = relsXml.replace(relTag, "");
|
186
|
+
const contentTypes = files["[Content_Types].xml"];
|
187
|
+
if (contentTypes) {
|
188
|
+
files["[Content_Types].xml"] = contentTypes.replace(new RegExp(`<Override[^>]+PartName=["']/${targetPath}["'][^>]*/>`, "g"), "");
|
189
|
+
}
|
190
|
+
}
|
@@ -17,15 +17,15 @@ export function buildMergedSheet(originalXml, mergedRows, mergeCells = []) {
|
|
17
17
|
// - Opening <sheetData> tag with any attributes
|
18
18
|
// - Any content between opening and closing tags (including line breaks)
|
19
19
|
// - Closing </sheetData> tag
|
20
|
-
let xmlData = originalXml.replace(/<sheetData[^>]*>[\s\S]*?<\/sheetData>/, `<sheetData>\n${mergedRows.join(
|
20
|
+
let xmlData = originalXml.replace(/<sheetData[^>]*>[\s\S]*?<\/sheetData>/, `<sheetData>\n${mergedRows.join("\n")}\n</sheetData>`);
|
21
21
|
// If merge cells were specified, add them after the sheetData section
|
22
22
|
if (mergeCells.length > 0) {
|
23
23
|
// Create mergeCells XML section:
|
24
24
|
// - Includes count attribute with total number of merges
|
25
25
|
// - Contains one mergeCell element for each merge definition
|
26
|
-
const mergeCellsXml = `<mergeCells count="${mergeCells.length}">${mergeCells.map(mc => `<mergeCell ref="${mc.ref}"/>`).join(
|
26
|
+
const mergeCellsXml = `<mergeCells count="${mergeCells.length}">${mergeCells.map(mc => `<mergeCell ref="${mc.ref}"/>`).join("")}</mergeCells>`;
|
27
27
|
// Insert the mergeCells section immediately after the sheetData closing tag
|
28
|
-
xmlData = xmlData.replace(
|
28
|
+
xmlData = xmlData.replace("</sheetData>", `</sheetData>${mergeCellsXml}`);
|
29
29
|
}
|
30
30
|
// Return the fully reconstructed XML
|
31
31
|
return xmlData;
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import { extractXmlFromSheet } from
|
1
|
+
import { extractXmlFromSheet } from "./extract-xml-from-sheet.js";
|
2
2
|
/**
|
3
3
|
* Parses a worksheet (either as Buffer or string) to extract row data,
|
4
4
|
* last row number, and merge cell information from Excel XML format.
|
@@ -21,13 +21,13 @@ import { extractXmlFromSheet } from './extract-xml-from-sheet.js';
|
|
21
21
|
*/
|
22
22
|
export function extractRowsFromSheet(sheet) {
|
23
23
|
// Convert Buffer input to XML string if needed
|
24
|
-
const xml = typeof sheet ===
|
24
|
+
const xml = typeof sheet === "string" ? sheet : extractXmlFromSheet(sheet);
|
25
25
|
// Extract the sheetData section containing all rows
|
26
26
|
const sheetDataMatch = xml.match(/<sheetData[^>]*>([\s\S]*?)<\/sheetData>/);
|
27
27
|
if (!sheetDataMatch) {
|
28
|
-
throw new Error(
|
28
|
+
throw new Error("sheetData not found in worksheet XML");
|
29
29
|
}
|
30
|
-
const sheetDataContent = sheetDataMatch[1] ||
|
30
|
+
const sheetDataContent = sheetDataMatch[1] || "";
|
31
31
|
// Extract all <row> elements using regex
|
32
32
|
const rowMatches = [...sheetDataContent.matchAll(/<row[\s\S]*?<\/row>/g)];
|
33
33
|
const rows = rowMatches.map(match => match[0]);
|
@@ -54,8 +54,8 @@ export function extractRowsFromSheet(sheet) {
|
|
54
54
|
});
|
55
55
|
}
|
56
56
|
return {
|
57
|
-
rows,
|
58
57
|
lastRowNumber,
|
59
58
|
mergeCells,
|
59
|
+
rows,
|
60
60
|
};
|
61
61
|
}
|
@@ -1 +1 @@
|
|
1
|
-
{"version":3,"file":"extract-xml-from-sheet.d.ts","sourceRoot":"","sources":["../../../src/lib/xml/extract-xml-from-sheet.ts"],"names":[],"mappings":";;AAEA;;;;;;;;;;;;GAYG;AACH,wBAAgB,mBAAmB,CAAC,MAAM,EAAE,MAAM,GAAG,MAAM,
|
1
|
+
{"version":3,"file":"extract-xml-from-sheet.d.ts","sourceRoot":"","sources":["../../../src/lib/xml/extract-xml-from-sheet.ts"],"names":[],"mappings":";;AAEA;;;;;;;;;;;;GAYG;AACH,wBAAgB,mBAAmB,CAAC,MAAM,EAAE,MAAM,GAAG,MAAM,CAmC1D"}
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import { inflateRaw } from
|
1
|
+
import { inflateRaw } from "pako";
|
2
2
|
/**
|
3
3
|
* Extracts and parses XML content from an Excel worksheet file (e.g., xl/worksheets/sheet1.xml).
|
4
4
|
* Handles both compressed (raw deflate) and uncompressed (plain XML) formats.
|
@@ -14,38 +14,32 @@ import { inflateRaw } from 'pako';
|
|
14
14
|
*/
|
15
15
|
export function extractXmlFromSheet(buffer) {
|
16
16
|
if (!buffer || buffer.length === 0) {
|
17
|
-
throw new Error(
|
17
|
+
throw new Error("Empty buffer provided");
|
18
18
|
}
|
19
19
|
let xml;
|
20
20
|
// Check if the buffer starts with an XML declaration (<?xml)
|
21
|
-
const startsWithXml = buffer.subarray(0, 5).toString(
|
21
|
+
const startsWithXml = buffer.subarray(0, 5).toString("utf8").trim().startsWith("<?xml");
|
22
22
|
if (startsWithXml) {
|
23
23
|
// Case 1: Already uncompressed XML - convert directly to string
|
24
|
-
xml = buffer.toString(
|
24
|
+
xml = buffer.toString("utf8");
|
25
25
|
}
|
26
26
|
else {
|
27
27
|
// Case 2: Attempt to decompress as raw deflate data
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
xml = inflated;
|
33
|
-
}
|
34
|
-
else {
|
35
|
-
throw new Error('Decompressed data does not contain sheetData');
|
36
|
-
}
|
28
|
+
const inflated = inflateRaw(buffer, { to: "string" });
|
29
|
+
// Validate the decompressed content contains worksheet data
|
30
|
+
if (inflated && inflated.includes("<sheetData")) {
|
31
|
+
xml = inflated;
|
37
32
|
}
|
38
|
-
|
39
|
-
|
40
|
-
// Continue to fallback attempt
|
33
|
+
else {
|
34
|
+
throw new Error("Decompressed data does not contain sheetData");
|
41
35
|
}
|
42
36
|
}
|
43
37
|
// Fallback: If no XML obtained yet, try direct UTF-8 conversion
|
44
38
|
if (!xml) {
|
45
|
-
xml = buffer.toString(
|
39
|
+
xml = buffer.toString("utf8");
|
46
40
|
}
|
47
41
|
// Sanitize XML by removing control characters (except tab, newline, carriage return)
|
48
42
|
// This handles potential corruption from binary data or encoding issues
|
49
|
-
xml = xml.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F]/g,
|
43
|
+
xml = xml.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F]/g, "");
|
50
44
|
return xml;
|
51
45
|
}
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import { inflateRaw } from
|
1
|
+
import { inflateRaw } from "pako";
|
2
2
|
/**
|
3
3
|
* Extracts and decompresses XML content from Excel system files (e.g., workbook.xml, [Content_Types].xml).
|
4
4
|
* Handles both compressed (raw DEFLATE) and uncompressed (plain XML) formats with comprehensive error handling.
|
@@ -20,17 +20,17 @@ export const extractXmlFromSystemContent = (buffer, name) => {
|
|
20
20
|
}
|
21
21
|
let xml;
|
22
22
|
// Check for XML declaration in first 5 bytes (<?xml)
|
23
|
-
const startsWithXml = buffer.subarray(0, 5).toString(
|
23
|
+
const startsWithXml = buffer.subarray(0, 5).toString("utf8").trim().startsWith("<?xml");
|
24
24
|
if (startsWithXml) {
|
25
25
|
// Case 1: Already uncompressed XML - convert directly to string
|
26
|
-
xml = buffer.toString(
|
26
|
+
xml = buffer.toString("utf8");
|
27
27
|
}
|
28
28
|
else {
|
29
29
|
// Case 2: Attempt DEFLATE decompression
|
30
30
|
try {
|
31
|
-
const inflated = inflateRaw(buffer, { to:
|
31
|
+
const inflated = inflateRaw(buffer, { to: "string" });
|
32
32
|
// Validate decompressed content contains XML declaration
|
33
|
-
if (inflated && inflated.includes(
|
33
|
+
if (inflated && inflated.includes("<?xml")) {
|
34
34
|
xml = inflated;
|
35
35
|
}
|
36
36
|
else {
|
@@ -38,12 +38,12 @@ export const extractXmlFromSystemContent = (buffer, name) => {
|
|
38
38
|
}
|
39
39
|
}
|
40
40
|
catch (error) {
|
41
|
-
const message = error instanceof Error ? error.message :
|
41
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
42
42
|
throw new Error(`Failed to decompress ${name}: ${message}`);
|
43
43
|
}
|
44
44
|
}
|
45
45
|
// Sanitize XML by removing illegal control characters (per XML 1.0 spec)
|
46
46
|
// Preserves tabs (0x09), newlines (0x0A), and carriage returns (0x0D)
|
47
|
-
xml = xml.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F]/g,
|
47
|
+
xml = xml.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F]/g, "");
|
48
48
|
return xml;
|
49
49
|
};
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import { Buffer } from
|
1
|
+
import { Buffer } from "node:buffer";
|
2
2
|
/**
|
3
3
|
* ZIP file signature constants in Buffer format.
|
4
4
|
* These magic numbers identify different sections of a ZIP file,
|
@@ -11,7 +11,7 @@ import { Buffer } from 'node:buffer';
|
|
11
11
|
* Format: 'PK\01\02'
|
12
12
|
* Found in the central directory that appears at the end of the ZIP file.
|
13
13
|
*/
|
14
|
-
export const CENTRAL_DIR_HEADER_SIG = Buffer.from(
|
14
|
+
export const CENTRAL_DIR_HEADER_SIG = Buffer.from("504b0102", "hex");
|
15
15
|
/**
|
16
16
|
* End of Central Directory Record signature (0x504b0506).
|
17
17
|
* Marks the end of the central directory and contains global information
|
@@ -19,11 +19,11 @@ export const CENTRAL_DIR_HEADER_SIG = Buffer.from('504b0102', 'hex');
|
|
19
19
|
* Format: 'PK\05\06'
|
20
20
|
* This is the last record in a valid ZIP file.
|
21
21
|
*/
|
22
|
-
export const END_OF_CENTRAL_DIR_SIG = Buffer.from(
|
22
|
+
export const END_OF_CENTRAL_DIR_SIG = Buffer.from("504b0506", "hex");
|
23
23
|
/**
|
24
24
|
* Local File Header signature (0x504b0304).
|
25
25
|
* Marks the beginning of a file entry within the ZIP archive.
|
26
26
|
* Format: 'PK\03\04' (ASCII letters PK followed by version numbers)
|
27
27
|
* Appears before each file's compressed data.
|
28
28
|
*/
|
29
|
-
export const LOCAL_FILE_HEADER_SIG = Buffer.from(
|
29
|
+
export const LOCAL_FILE_HEADER_SIG = Buffer.from("504b0304", "hex");
|
package/build/lib/zip/create.js
CHANGED
@@ -1,7 +1,7 @@
|
|
1
|
-
import { Buffer } from
|
2
|
-
import { deflateRawSync } from
|
3
|
-
import {
|
4
|
-
import {
|
1
|
+
import { Buffer } from "node:buffer";
|
2
|
+
import { deflateRawSync } from "node:zlib";
|
3
|
+
import { crc32, dosTime, toBytes } from "./utils.js";
|
4
|
+
import { CENTRAL_DIR_HEADER_SIG, END_OF_CENTRAL_DIR_SIG, LOCAL_FILE_HEADER_SIG, } from "./constants.js";
|
5
5
|
/**
|
6
6
|
* Creates a ZIP archive from a collection of files.
|
7
7
|
*
|
@@ -13,11 +13,11 @@ export function create(files) {
|
|
13
13
|
const centralDirectory = [];
|
14
14
|
let offset = 0;
|
15
15
|
for (const [filename, rawContent] of Object.entries(files).sort(([a], [b]) => a.localeCompare(b))) {
|
16
|
-
if (filename.includes(
|
16
|
+
if (filename.includes("..")) {
|
17
17
|
throw new Error(`Invalid filename: ${filename}`);
|
18
18
|
}
|
19
19
|
const content = Buffer.isBuffer(rawContent) ? rawContent : Buffer.from(rawContent);
|
20
|
-
const fileNameBuf = Buffer.from(filename,
|
20
|
+
const fileNameBuf = Buffer.from(filename, "utf8");
|
21
21
|
const modTime = dosTime(new Date());
|
22
22
|
const crc = crc32(content);
|
23
23
|
const compressed = deflateRawSync(content);
|
package/build/lib/zip/index.d.ts
CHANGED
@@ -1,3 +1,3 @@
|
|
1
|
-
export * from
|
2
|
-
export * from
|
1
|
+
export * from "./create.js";
|
2
|
+
export * from "./read.js";
|
3
3
|
//# sourceMappingURL=index.d.ts.map
|
package/build/lib/zip/index.js
CHANGED
@@ -1,2 +1,2 @@
|
|
1
|
-
export * from
|
2
|
-
export * from
|
1
|
+
export * from "./create.js";
|
2
|
+
export * from "./read.js";
|
@@ -1 +1 @@
|
|
1
|
-
{"version":3,"file":"read.d.ts","sourceRoot":"","sources":["../../../src/lib/zip/read.ts"],"names":[],"mappings":";;AAEA;;;;;;GAMG;AAEH,wBAAgB,IAAI,CAAC,MAAM,EAAE,MAAM,GAAG;IAAE,CAAC,CAAC,EAAE,MAAM,GAAG,MAAM,
|
1
|
+
{"version":3,"file":"read.d.ts","sourceRoot":"","sources":["../../../src/lib/zip/read.ts"],"names":[],"mappings":";;AAEA;;;;;;GAMG;AAEH,wBAAgB,IAAI,CAAC,MAAM,EAAE,MAAM,GAAG;IAAE,CAAC,CAAC,EAAE,MAAM,GAAG,MAAM,CAAA;CAAE,CA+C5D"}
|
package/build/lib/zip/read.js
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
import { inflateRawSync } from
|
1
|
+
import { inflateRawSync } from "node:zlib";
|
2
2
|
/**
|
3
3
|
* Parses a ZIP archive from a buffer and extracts the files within.
|
4
4
|
*
|
@@ -30,7 +30,7 @@ export function read(buffer) {
|
|
30
30
|
nextOffset = buffer.length;
|
31
31
|
}
|
32
32
|
const compressedData = buffer.subarray(dataStart, nextOffset);
|
33
|
-
let content =
|
33
|
+
let content = "";
|
34
34
|
try {
|
35
35
|
if (compressionMethod === 0) {
|
36
36
|
content = compressedData.toString();
|
@@ -43,7 +43,7 @@ export function read(buffer) {
|
|
43
43
|
}
|
44
44
|
}
|
45
45
|
catch (error) {
|
46
|
-
const message = error instanceof Error ? error.message :
|
46
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
47
47
|
throw new Error(`Error unpacking file ${fileName}: ${message}`);
|
48
48
|
}
|
49
49
|
files[fileName] = content;
|
package/build/lib/zip/utils.d.ts
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
/// <reference types="node" />
|
2
2
|
/// <reference types="node" />
|
3
|
-
import { Buffer } from
|
3
|
+
import { Buffer } from "buffer";
|
4
4
|
/**
|
5
5
|
* Computes a CRC-32 checksum for the given Buffer using the standard IEEE 802.3 polynomial.
|
6
6
|
* This implementation uses a precomputed lookup table for optimal performance.
|
package/build/lib/zip/utils.js
CHANGED
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@js-ak/excel-toolbox",
|
3
|
-
"version": "1.
|
3
|
+
"version": "1.1.0",
|
4
4
|
"description": "excel-toolbox",
|
5
5
|
"publishConfig": {
|
6
6
|
"access": "public",
|
@@ -42,6 +42,7 @@
|
|
42
42
|
"@semantic-release/github": "10.0.6",
|
43
43
|
"@semantic-release/npm": "12.0.1",
|
44
44
|
"@semantic-release/release-notes-generator": "14.0.0",
|
45
|
+
"@stylistic/eslint-plugin-ts": "4.2.0",
|
45
46
|
"@types/node": "22.14.0",
|
46
47
|
"@types/pako": "2.0.3",
|
47
48
|
"eslint": "9.24.0",
|