stegdoc 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +214 -0
- package/bootstrap.js +33 -0
- package/package.json +60 -0
- package/src/commands/decode.js +201 -0
- package/src/commands/encode.js +346 -0
- package/src/commands/info.js +113 -0
- package/src/commands/verify.js +169 -0
- package/src/index.js +87 -0
- package/src/lib/compression.js +97 -0
- package/src/lib/crypto.js +118 -0
- package/src/lib/decoy-generator.js +306 -0
- package/src/lib/docx-handler.js +161 -0
- package/src/lib/file-handler.js +113 -0
- package/src/lib/file-utils.js +150 -0
- package/src/lib/interactive.js +190 -0
- package/src/lib/metadata.js +111 -0
- package/src/lib/utils.js +227 -0
- package/src/lib/xlsx-handler.js +359 -0
- package/src/lib/xml-utils.js +115 -0
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
const { Document, Paragraph, TextRun, Packer } = require('docx');
|
|
2
|
+
const fs = require('fs');
|
|
3
|
+
const path = require('path');
|
|
4
|
+
const { serializeMetadata, parseMetadata } = require('./metadata');
|
|
5
|
+
const { parseXmlFromZip, ensureArray, extractTextContent } = require('./xml-utils');
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Create a DOCX file with base64 content and metadata
|
|
9
|
+
* @param {object} options - Options for creating the DOCX
|
|
10
|
+
* @param {string} options.base64Content - Base64 content to store
|
|
11
|
+
* @param {object} options.metadata - Metadata object
|
|
12
|
+
* @param {string} options.outputPath - Output file path
|
|
13
|
+
* @returns {Promise<string>} Path to created file
|
|
14
|
+
*/
|
|
15
|
+
async function createDocxWithBase64(options) {
|
|
16
|
+
const { base64Content, metadata, outputPath } = options;
|
|
17
|
+
|
|
18
|
+
// Serialize metadata to JSON string
|
|
19
|
+
const metadataStr = serializeMetadata(metadata);
|
|
20
|
+
|
|
21
|
+
// Create document with metadata in custom properties and hidden paragraph
|
|
22
|
+
const doc = new Document({
|
|
23
|
+
sections: [
|
|
24
|
+
{
|
|
25
|
+
properties: {},
|
|
26
|
+
children: [
|
|
27
|
+
// Metadata paragraph (hidden for user, but readable programmatically)
|
|
28
|
+
new Paragraph({
|
|
29
|
+
children: [
|
|
30
|
+
new TextRun({
|
|
31
|
+
text: `WHITENER_METADATA:${metadataStr}`,
|
|
32
|
+
size: 1, // Very small font
|
|
33
|
+
}),
|
|
34
|
+
],
|
|
35
|
+
}),
|
|
36
|
+
// Separator
|
|
37
|
+
new Paragraph({
|
|
38
|
+
children: [
|
|
39
|
+
new TextRun({
|
|
40
|
+
text: '---',
|
|
41
|
+
break: 1,
|
|
42
|
+
}),
|
|
43
|
+
],
|
|
44
|
+
}),
|
|
45
|
+
// Base64 content
|
|
46
|
+
new Paragraph({
|
|
47
|
+
children: [
|
|
48
|
+
new TextRun({
|
|
49
|
+
text: base64Content,
|
|
50
|
+
font: 'Courier New', // Monospace for base64
|
|
51
|
+
size: 16, // 8pt font
|
|
52
|
+
}),
|
|
53
|
+
],
|
|
54
|
+
}),
|
|
55
|
+
],
|
|
56
|
+
},
|
|
57
|
+
],
|
|
58
|
+
});
|
|
59
|
+
|
|
60
|
+
// Generate DOCX file
|
|
61
|
+
const buffer = await Packer.toBuffer(doc);
|
|
62
|
+
|
|
63
|
+
// Ensure output directory exists
|
|
64
|
+
const outputDir = path.dirname(outputPath);
|
|
65
|
+
if (!fs.existsSync(outputDir)) {
|
|
66
|
+
fs.mkdirSync(outputDir, { recursive: true });
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// Write to file
|
|
70
|
+
fs.writeFileSync(outputPath, buffer);
|
|
71
|
+
|
|
72
|
+
return outputPath;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Read a DOCX file and extract base64 content and metadata
|
|
77
|
+
* Uses namespace-agnostic XML parsing to handle w:, ns0:, ns1:, etc.
|
|
78
|
+
* @param {string} docxPath - Path to DOCX file
|
|
79
|
+
* @returns {Promise<object>} Object containing base64Content and metadata
|
|
80
|
+
*/
|
|
81
|
+
async function readDocxBase64(docxPath) {
|
|
82
|
+
if (!fs.existsSync(docxPath)) {
|
|
83
|
+
throw new Error(`DOCX file not found: ${docxPath}`);
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
try {
|
|
87
|
+
// Parse document.xml with namespace-agnostic parser
|
|
88
|
+
const docParsed = parseXmlFromZip(docxPath, 'word/document.xml');
|
|
89
|
+
|
|
90
|
+
if (!docParsed) {
|
|
91
|
+
throw new Error('Could not find document.xml in DOCX file');
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
// Extract all text from the document
|
|
95
|
+
// Structure: document > body > p[] > r[] > t
|
|
96
|
+
const fullText = extractAllText(docParsed);
|
|
97
|
+
|
|
98
|
+
// Parse the extracted text
|
|
99
|
+
const metadataMarker = 'WHITENER_METADATA:';
|
|
100
|
+
const metadataStart = fullText.indexOf(metadataMarker);
|
|
101
|
+
|
|
102
|
+
if (metadataStart === -1) {
|
|
103
|
+
throw new Error('No metadata found in DOCX file. This may not be a whitener-encoded file.');
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
// Find the separator "---" which comes after the metadata
|
|
107
|
+
const separatorIndex = fullText.indexOf('---', metadataStart);
|
|
108
|
+
|
|
109
|
+
if (separatorIndex === -1) {
|
|
110
|
+
throw new Error('Invalid file format: separator not found');
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
// Extract metadata JSON between marker and separator
|
|
114
|
+
const metadataStr = fullText.substring(metadataStart + metadataMarker.length, separatorIndex).trim();
|
|
115
|
+
const metadata = parseMetadata(metadataStr);
|
|
116
|
+
|
|
117
|
+
// Extract base64 content (everything after the separator)
|
|
118
|
+
const base64Content = fullText.substring(separatorIndex + 3).trim();
|
|
119
|
+
|
|
120
|
+
return {
|
|
121
|
+
base64Content,
|
|
122
|
+
metadata,
|
|
123
|
+
};
|
|
124
|
+
} catch (error) {
|
|
125
|
+
throw new Error(`Failed to read DOCX file: ${error.message}`);
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
/**
|
|
130
|
+
* Extract all text content from parsed DOCX document
|
|
131
|
+
* @param {object} docParsed - Parsed document.xml
|
|
132
|
+
* @returns {string} Concatenated text content
|
|
133
|
+
*/
|
|
134
|
+
function extractAllText(docParsed) {
|
|
135
|
+
let fullText = '';
|
|
136
|
+
|
|
137
|
+
// Navigate: document > body > p (paragraphs)
|
|
138
|
+
const body = docParsed?.document?.body;
|
|
139
|
+
if (!body) return fullText;
|
|
140
|
+
|
|
141
|
+
const paragraphs = ensureArray(body.p);
|
|
142
|
+
|
|
143
|
+
for (const para of paragraphs) {
|
|
144
|
+
// Each paragraph has r (runs) containing t (text)
|
|
145
|
+
const runs = ensureArray(para.r);
|
|
146
|
+
|
|
147
|
+
for (const run of runs) {
|
|
148
|
+
// Text can be in 't' property
|
|
149
|
+
if (run.t !== undefined) {
|
|
150
|
+
fullText += extractTextContent(run.t);
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
return fullText;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
module.exports = {
|
|
159
|
+
createDocxWithBase64,
|
|
160
|
+
readDocxBase64,
|
|
161
|
+
};
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
const fs = require('fs');
|
|
2
|
+
const path = require('path');
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Read a file and encode it to base64
|
|
6
|
+
* @param {string} filePath - Path to the file
|
|
7
|
+
* @returns {object} Object containing base64 string, filename, extension, and size
|
|
8
|
+
*/
|
|
9
|
+
function encodeFileToBase64(filePath) {
|
|
10
|
+
if (!fs.existsSync(filePath)) {
|
|
11
|
+
throw new Error(`File not found: ${filePath}`);
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
const stats = fs.statSync(filePath);
|
|
15
|
+
|
|
16
|
+
if (!stats.isFile()) {
|
|
17
|
+
throw new Error(`Path is not a file: ${filePath}`);
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
const fileBuffer = fs.readFileSync(filePath);
|
|
21
|
+
const base64 = fileBuffer.toString('base64');
|
|
22
|
+
const filename = path.basename(filePath);
|
|
23
|
+
const extension = path.extname(filePath);
|
|
24
|
+
|
|
25
|
+
return {
|
|
26
|
+
base64,
|
|
27
|
+
filename,
|
|
28
|
+
extension,
|
|
29
|
+
size: stats.size,
|
|
30
|
+
};
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* Decode base64 string and write to file
|
|
35
|
+
* @param {string} base64 - Base64 encoded string
|
|
36
|
+
* @param {string} outputPath - Output file path
|
|
37
|
+
*/
|
|
38
|
+
function decodeBase64ToFile(base64, outputPath) {
|
|
39
|
+
const buffer = Buffer.from(base64, 'base64');
|
|
40
|
+
|
|
41
|
+
// Ensure output directory exists
|
|
42
|
+
const outputDir = path.dirname(outputPath);
|
|
43
|
+
if (!fs.existsSync(outputDir)) {
|
|
44
|
+
fs.mkdirSync(outputDir, { recursive: true });
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
fs.writeFileSync(outputPath, buffer);
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* Split base64 string into chunks based on size limit
|
|
52
|
+
* @param {string} base64 - Base64 string to split
|
|
53
|
+
* @param {number} chunkSizeBytes - Maximum size per chunk in bytes
|
|
54
|
+
* @returns {Array<string>} Array of base64 chunks
|
|
55
|
+
*/
|
|
56
|
+
function splitBase64(base64, chunkSizeBytes) {
|
|
57
|
+
const chunks = [];
|
|
58
|
+
let offset = 0;
|
|
59
|
+
|
|
60
|
+
while (offset < base64.length) {
|
|
61
|
+
chunks.push(base64.slice(offset, offset + chunkSizeBytes));
|
|
62
|
+
offset += chunkSizeBytes;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
return chunks;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Merge base64 chunks back into a single string
|
|
70
|
+
* @param {Array<string>} chunks - Array of base64 chunks
|
|
71
|
+
* @returns {string} Merged base64 string
|
|
72
|
+
*/
|
|
73
|
+
function mergeBase64Chunks(chunks) {
|
|
74
|
+
return chunks.join('');
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Calculate how many chunks will be needed for a file
|
|
79
|
+
* @param {number} fileSize - File size in bytes
|
|
80
|
+
* @param {number} chunkSizeBytes - Chunk size in bytes
|
|
81
|
+
* @returns {number} Number of chunks needed
|
|
82
|
+
*/
|
|
83
|
+
function calculateChunkCount(fileSize, chunkSizeBytes) {
|
|
84
|
+
// Base64 encoding increases size by ~33%
|
|
85
|
+
const base64Size = Math.ceil(fileSize * 4 / 3);
|
|
86
|
+
return Math.ceil(base64Size / chunkSizeBytes);
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
/**
|
|
90
|
+
* Validate if a path is writable
|
|
91
|
+
* @param {string} dirPath - Directory path to check
|
|
92
|
+
* @returns {boolean} True if writable
|
|
93
|
+
*/
|
|
94
|
+
function isDirectoryWritable(dirPath) {
|
|
95
|
+
try {
|
|
96
|
+
if (!fs.existsSync(dirPath)) {
|
|
97
|
+
fs.mkdirSync(dirPath, { recursive: true });
|
|
98
|
+
}
|
|
99
|
+
fs.accessSync(dirPath, fs.constants.W_OK);
|
|
100
|
+
return true;
|
|
101
|
+
} catch (error) {
|
|
102
|
+
return false;
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
module.exports = {
|
|
107
|
+
encodeFileToBase64,
|
|
108
|
+
decodeBase64ToFile,
|
|
109
|
+
splitBase64,
|
|
110
|
+
mergeBase64Chunks,
|
|
111
|
+
calculateChunkCount,
|
|
112
|
+
isDirectoryWritable,
|
|
113
|
+
};
|
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
const fs = require('fs');
|
|
2
|
+
const path = require('path');
|
|
3
|
+
const { parseMetadata } = require('./metadata');
|
|
4
|
+
const { parseFilename } = require('./utils');
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Extract content and metadata based on format
|
|
8
|
+
* @param {object} readResult - Result from readFile
|
|
9
|
+
* @param {string} format - File format ('xlsx' or 'docx')
|
|
10
|
+
* @returns {object} { encryptedContent, encryptionMeta, metadata }
|
|
11
|
+
*/
|
|
12
|
+
function extractContent(readResult, format) {
|
|
13
|
+
if (format === 'xlsx') {
|
|
14
|
+
return {
|
|
15
|
+
encryptedContent: readResult.base64Content,
|
|
16
|
+
encryptionMeta: readResult.encryptionMeta,
|
|
17
|
+
metadata: parseMetadata(readResult.metadata),
|
|
18
|
+
};
|
|
19
|
+
} else {
|
|
20
|
+
// DOCX: encryption meta is embedded in content with ||| separator
|
|
21
|
+
const { base64Content, metadata } = readResult;
|
|
22
|
+
|
|
23
|
+
// Check if this is a v2+ encrypted file
|
|
24
|
+
if (base64Content.includes('|||')) {
|
|
25
|
+
const [encryptionMeta, encryptedContent] = base64Content.split('|||');
|
|
26
|
+
return {
|
|
27
|
+
encryptedContent,
|
|
28
|
+
encryptionMeta,
|
|
29
|
+
metadata,
|
|
30
|
+
};
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
// Legacy unencrypted DOCX
|
|
34
|
+
return {
|
|
35
|
+
encryptedContent: base64Content,
|
|
36
|
+
encryptionMeta: null,
|
|
37
|
+
metadata,
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* Find all parts of a multi-part file in a directory
|
|
44
|
+
* @param {string} dirPath - Directory to search
|
|
45
|
+
* @param {string} hash - Original hash from metadata
|
|
46
|
+
* @param {string} format - File format ('xlsx' or 'docx')
|
|
47
|
+
* @param {number} [expectedParts] - Expected total parts (optional, for validation)
|
|
48
|
+
* @returns {Array<{path: string, partNumber: number, filename: string}>} Array of parts sorted by part number
|
|
49
|
+
*/
|
|
50
|
+
function findMultiPartFiles(dirPath, hash, format, expectedParts = null) {
|
|
51
|
+
const files = fs.readdirSync(dirPath);
|
|
52
|
+
const parts = [];
|
|
53
|
+
const ext = format === 'docx' ? '.docx' : '.xlsx';
|
|
54
|
+
|
|
55
|
+
// Also support legacy hex filenames for backward compatibility
|
|
56
|
+
const legacyBaseHash = hash.length >= 16 ? hash.slice(0, 16) : hash;
|
|
57
|
+
|
|
58
|
+
for (const file of files) {
|
|
59
|
+
if (!file.toLowerCase().endsWith(ext)) continue;
|
|
60
|
+
|
|
61
|
+
const parsed = parseFilename(file);
|
|
62
|
+
if (!parsed || parsed.partNumber === null) continue;
|
|
63
|
+
|
|
64
|
+
// Match by new realistic filename pattern
|
|
65
|
+
// Check if file matches the expected pattern (same reportId from hash)
|
|
66
|
+
if (parsed.reportId) {
|
|
67
|
+
// New realistic format - match by reportId (last 4 chars of hash)
|
|
68
|
+
// This is deterministic and doesn't depend on current date
|
|
69
|
+
const expectedReportId = hash.slice(-4).toUpperCase();
|
|
70
|
+
if (parsed.reportId === expectedReportId) {
|
|
71
|
+
parts.push({
|
|
72
|
+
path: path.join(dirPath, file),
|
|
73
|
+
partNumber: parsed.partNumber,
|
|
74
|
+
filename: file,
|
|
75
|
+
dateStr: parsed.dateStr,
|
|
76
|
+
timeStr: parsed.timeStr,
|
|
77
|
+
});
|
|
78
|
+
}
|
|
79
|
+
} else if (parsed.baseHash === legacyBaseHash) {
|
|
80
|
+
// Legacy hex format - match by base hash
|
|
81
|
+
parts.push({
|
|
82
|
+
path: path.join(dirPath, file),
|
|
83
|
+
partNumber: parsed.partNumber,
|
|
84
|
+
filename: file,
|
|
85
|
+
});
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
// Sort by part number
|
|
90
|
+
parts.sort((a, b) => a.partNumber - b.partNumber);
|
|
91
|
+
|
|
92
|
+
// For realistic filenames, ensure all parts have the same date/time pattern
|
|
93
|
+
// This handles edge cases where multiple file sets might share the same reportId
|
|
94
|
+
if (parts.length > 0 && parts[0].dateStr) {
|
|
95
|
+
const refDateStr = parts[0].dateStr;
|
|
96
|
+
const refTimeStr = parts[0].timeStr;
|
|
97
|
+
const filteredParts = parts.filter(
|
|
98
|
+
(p) => p.dateStr === refDateStr && p.timeStr === refTimeStr
|
|
99
|
+
);
|
|
100
|
+
// If filtering removed some parts, use the filtered set
|
|
101
|
+
if (filteredParts.length !== parts.length) {
|
|
102
|
+
parts.length = 0;
|
|
103
|
+
parts.push(...filteredParts);
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
// Validate sequential parts if expectedParts is provided
|
|
108
|
+
if (expectedParts !== null && parts.length === expectedParts) {
|
|
109
|
+
for (let i = 0; i < expectedParts; i++) {
|
|
110
|
+
if (parts[i].partNumber !== i + 1) {
|
|
111
|
+
throw new Error(`Missing part ${i + 1}. Parts must be sequential.`);
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
return parts;
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
/**
|
|
120
|
+
* Check if a directory is writable
|
|
121
|
+
* @param {string} dirPath - Directory path to check
|
|
122
|
+
* @returns {boolean} True if writable
|
|
123
|
+
*/
|
|
124
|
+
function isDirectoryWritable(dirPath) {
|
|
125
|
+
try {
|
|
126
|
+
if (!fs.existsSync(dirPath)) {
|
|
127
|
+
fs.mkdirSync(dirPath, { recursive: true });
|
|
128
|
+
}
|
|
129
|
+
fs.accessSync(dirPath, fs.constants.W_OK);
|
|
130
|
+
return true;
|
|
131
|
+
} catch {
|
|
132
|
+
return false;
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
/**
|
|
137
|
+
* Merge base64 chunks back into a single string
|
|
138
|
+
* @param {Array<string>} chunks - Array of base64 chunks
|
|
139
|
+
* @returns {string} Merged base64 string
|
|
140
|
+
*/
|
|
141
|
+
function mergeBase64Chunks(chunks) {
|
|
142
|
+
return chunks.join('');
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
module.exports = {
|
|
146
|
+
extractContent,
|
|
147
|
+
findMultiPartFiles,
|
|
148
|
+
isDirectoryWritable,
|
|
149
|
+
mergeBase64Chunks,
|
|
150
|
+
};
|
|
@@ -0,0 +1,190 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Interactive prompts for encode/decode commands
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Check if running in interactive mode (no relevant flags provided)
|
|
7
|
+
* @param {object} options - Command options
|
|
8
|
+
* @param {string} command - Command name ('encode' or 'decode')
|
|
9
|
+
* @returns {boolean}
|
|
10
|
+
*/
|
|
11
|
+
function shouldRunInteractive(options, command) {
|
|
12
|
+
// If --yes flag is set, never run interactive
|
|
13
|
+
if (options.yes) return false;
|
|
14
|
+
|
|
15
|
+
// If --quiet flag is set, never run interactive
|
|
16
|
+
if (options.quiet) return false;
|
|
17
|
+
|
|
18
|
+
if (command === 'encode') {
|
|
19
|
+
// Run interactive if no format or password specified
|
|
20
|
+
const hasFormat = options.format && options.format !== 'xlsx'; // xlsx is default
|
|
21
|
+
const hasPassword = !!options.password;
|
|
22
|
+
return !hasFormat && !hasPassword;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
if (command === 'decode') {
|
|
26
|
+
// For decode, only prompt if encrypted file needs password
|
|
27
|
+
// This is handled separately in decode command
|
|
28
|
+
return false;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
return false;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* Prompt for encode options interactively
|
|
36
|
+
* @param {string} filename - Input filename for context
|
|
37
|
+
* @returns {Promise<object>} Selected options
|
|
38
|
+
*/
|
|
39
|
+
async function promptEncodeOptions(filename) {
|
|
40
|
+
// Dynamic import for ESM inquirer
|
|
41
|
+
const { default: inquirer } = await import('inquirer');
|
|
42
|
+
|
|
43
|
+
console.log();
|
|
44
|
+
|
|
45
|
+
// First get format and encryption choice
|
|
46
|
+
const basicAnswers = await inquirer.prompt([
|
|
47
|
+
{
|
|
48
|
+
type: 'list',
|
|
49
|
+
name: 'format',
|
|
50
|
+
message: 'Select output format:',
|
|
51
|
+
choices: [
|
|
52
|
+
{ name: 'XLSX (Excel) - recommended, better hiding spots', value: 'xlsx' },
|
|
53
|
+
{ name: 'DOCX (Word) - fallback option', value: 'docx' },
|
|
54
|
+
],
|
|
55
|
+
default: 'xlsx',
|
|
56
|
+
},
|
|
57
|
+
{
|
|
58
|
+
type: 'confirm',
|
|
59
|
+
name: 'useEncryption',
|
|
60
|
+
message: 'Encrypt the file with a password?',
|
|
61
|
+
default: true,
|
|
62
|
+
},
|
|
63
|
+
]);
|
|
64
|
+
|
|
65
|
+
let password;
|
|
66
|
+
|
|
67
|
+
// If encryption is enabled, prompt for password with confirmation
|
|
68
|
+
if (basicAnswers.useEncryption) {
|
|
69
|
+
let passwordsMatch = false;
|
|
70
|
+
|
|
71
|
+
while (!passwordsMatch) {
|
|
72
|
+
const { password: pwd } = await inquirer.prompt([
|
|
73
|
+
{
|
|
74
|
+
type: 'password',
|
|
75
|
+
name: 'password',
|
|
76
|
+
message: 'Enter encryption password:',
|
|
77
|
+
mask: '*',
|
|
78
|
+
validate: (input) => {
|
|
79
|
+
if (input.length < 8) {
|
|
80
|
+
return 'Password must be at least 8 characters';
|
|
81
|
+
}
|
|
82
|
+
return true;
|
|
83
|
+
},
|
|
84
|
+
},
|
|
85
|
+
]);
|
|
86
|
+
|
|
87
|
+
const { passwordConfirm } = await inquirer.prompt([
|
|
88
|
+
{
|
|
89
|
+
type: 'password',
|
|
90
|
+
name: 'passwordConfirm',
|
|
91
|
+
message: 'Confirm password:',
|
|
92
|
+
mask: '*',
|
|
93
|
+
},
|
|
94
|
+
]);
|
|
95
|
+
|
|
96
|
+
if (pwd === passwordConfirm) {
|
|
97
|
+
password = pwd;
|
|
98
|
+
passwordsMatch = true;
|
|
99
|
+
} else {
|
|
100
|
+
console.log('Passwords do not match. Please try again.\n');
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
// Get chunk size
|
|
106
|
+
const { chunkSize } = await inquirer.prompt([
|
|
107
|
+
{
|
|
108
|
+
type: 'input',
|
|
109
|
+
name: 'chunkSize',
|
|
110
|
+
message: 'Chunk size or number of parts (e.g., 5MB, "3 parts", or "max"):',
|
|
111
|
+
default: '5MB',
|
|
112
|
+
validate: (input) => {
|
|
113
|
+
const trimmed = input.trim().toLowerCase();
|
|
114
|
+
// Allow special values for no splitting
|
|
115
|
+
if (['0', 'max', 'single', 'none'].includes(trimmed)) {
|
|
116
|
+
return true;
|
|
117
|
+
}
|
|
118
|
+
// Allow "X parts" format
|
|
119
|
+
if (/^\d+\s*parts?$/i.test(trimmed)) {
|
|
120
|
+
const num = parseInt(trimmed, 10);
|
|
121
|
+
if (num < 1) return 'Number of parts must be at least 1';
|
|
122
|
+
return true;
|
|
123
|
+
}
|
|
124
|
+
// Allow size format
|
|
125
|
+
if (!/^\d+(\.\d+)?\s*(B|KB|MB|GB)?$/i.test(trimmed)) {
|
|
126
|
+
return 'Use "5MB", "3 parts", or "max" for no splitting';
|
|
127
|
+
}
|
|
128
|
+
return true;
|
|
129
|
+
},
|
|
130
|
+
},
|
|
131
|
+
]);
|
|
132
|
+
|
|
133
|
+
return {
|
|
134
|
+
format: basicAnswers.format,
|
|
135
|
+
password,
|
|
136
|
+
chunkSize,
|
|
137
|
+
};
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
/**
|
|
141
|
+
* Prompt for password when decoding encrypted file
|
|
142
|
+
* @returns {Promise<string>} Password
|
|
143
|
+
*/
|
|
144
|
+
async function promptPassword() {
|
|
145
|
+
const { default: inquirer } = await import('inquirer');
|
|
146
|
+
|
|
147
|
+
const { password } = await inquirer.prompt([
|
|
148
|
+
{
|
|
149
|
+
type: 'password',
|
|
150
|
+
name: 'password',
|
|
151
|
+
message: 'Enter decryption password:',
|
|
152
|
+
mask: '*',
|
|
153
|
+
validate: (input) => {
|
|
154
|
+
if (!input) {
|
|
155
|
+
return 'Password is required for encrypted files';
|
|
156
|
+
}
|
|
157
|
+
return true;
|
|
158
|
+
},
|
|
159
|
+
},
|
|
160
|
+
]);
|
|
161
|
+
|
|
162
|
+
return password;
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
/**
|
|
166
|
+
* Prompt for confirmation before overwriting
|
|
167
|
+
* @param {string} filePath - Path to file that will be overwritten
|
|
168
|
+
* @returns {Promise<boolean>} True if user confirms
|
|
169
|
+
*/
|
|
170
|
+
async function promptOverwrite(filePath) {
|
|
171
|
+
const { default: inquirer } = await import('inquirer');
|
|
172
|
+
|
|
173
|
+
const { confirm } = await inquirer.prompt([
|
|
174
|
+
{
|
|
175
|
+
type: 'confirm',
|
|
176
|
+
name: 'confirm',
|
|
177
|
+
message: `File "${filePath}" already exists. Overwrite?`,
|
|
178
|
+
default: false,
|
|
179
|
+
},
|
|
180
|
+
]);
|
|
181
|
+
|
|
182
|
+
return confirm;
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
module.exports = {
|
|
186
|
+
shouldRunInteractive,
|
|
187
|
+
promptEncodeOptions,
|
|
188
|
+
promptPassword,
|
|
189
|
+
promptOverwrite,
|
|
190
|
+
};
|