stegdoc 4.0.0 → 5.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -21
- package/README.md +214 -214
- package/package.json +59 -59
- package/src/commands/decode.js +485 -343
- package/src/commands/encode.js +567 -449
- package/src/commands/info.js +118 -114
- package/src/commands/verify.js +207 -204
- package/src/index.js +89 -87
- package/src/lib/compression.js +177 -115
- package/src/lib/crypto.js +172 -172
- package/src/lib/decoy-generator.js +306 -306
- package/src/lib/docx-handler.js +587 -161
- package/src/lib/docx-templates.js +355 -0
- package/src/lib/file-handler.js +113 -113
- package/src/lib/file-utils.js +160 -150
- package/src/lib/interactive.js +190 -190
- package/src/lib/log-generator.js +764 -0
- package/src/lib/metadata.js +151 -122
- package/src/lib/streams.js +197 -197
- package/src/lib/utils.js +227 -227
- package/src/lib/xlsx-handler.js +597 -416
- package/src/lib/xml-utils.js +115 -115
package/src/lib/file-utils.js
CHANGED
|
@@ -1,150 +1,160 @@
|
|
|
1
|
-
const fs = require('fs');
|
|
2
|
-
const path = require('path');
|
|
3
|
-
const { parseMetadata } = require('./metadata');
|
|
4
|
-
const { parseFilename } = require('./utils');
|
|
5
|
-
|
|
6
|
-
/**
|
|
7
|
-
* Extract content and metadata based on format
|
|
8
|
-
* @param {object} readResult - Result from readFile
|
|
9
|
-
* @param {string} format - File format ('xlsx' or 'docx')
|
|
10
|
-
* @returns {object} { encryptedContent, encryptionMeta, metadata }
|
|
11
|
-
*/
|
|
12
|
-
function extractContent(readResult, format) {
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
//
|
|
34
|
-
|
|
35
|
-
encryptedContent
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
}
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
}
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
1
|
+
const fs = require('fs');
|
|
2
|
+
const path = require('path');
|
|
3
|
+
const { parseMetadata } = require('./metadata');
|
|
4
|
+
const { parseFilename } = require('./utils');
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Extract content and metadata based on format
|
|
8
|
+
* @param {object} readResult - Result from readFile
|
|
9
|
+
* @param {string} format - File format ('xlsx' or 'docx')
|
|
10
|
+
* @returns {object} { encryptedContent, encryptionMeta, metadata }
|
|
11
|
+
*/
|
|
12
|
+
function extractContent(readResult, format) {
|
|
13
|
+
// v5 log-embed format returns metadata already parsed
|
|
14
|
+
if (readResult.formatVersion === 'v5') {
|
|
15
|
+
return {
|
|
16
|
+
encryptedContent: null, // v5 uses payloadBuffer instead
|
|
17
|
+
encryptionMeta: readResult.encryptionMeta,
|
|
18
|
+
metadata: readResult.metadata,
|
|
19
|
+
payloadBuffer: readResult.payloadBuffer,
|
|
20
|
+
};
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
if (format === 'xlsx') {
|
|
24
|
+
return {
|
|
25
|
+
encryptedContent: readResult.base64Content,
|
|
26
|
+
encryptionMeta: readResult.encryptionMeta,
|
|
27
|
+
metadata: parseMetadata(readResult.metadata),
|
|
28
|
+
};
|
|
29
|
+
} else {
|
|
30
|
+
// DOCX: encryption meta is embedded in content with ||| separator
|
|
31
|
+
const { base64Content, metadata } = readResult;
|
|
32
|
+
|
|
33
|
+
// Check if this is a v2+ encrypted file
|
|
34
|
+
if (base64Content.includes('|||')) {
|
|
35
|
+
const [encryptionMeta, encryptedContent] = base64Content.split('|||');
|
|
36
|
+
return {
|
|
37
|
+
encryptedContent,
|
|
38
|
+
encryptionMeta,
|
|
39
|
+
metadata,
|
|
40
|
+
};
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
// Legacy unencrypted DOCX
|
|
44
|
+
return {
|
|
45
|
+
encryptedContent: base64Content,
|
|
46
|
+
encryptionMeta: null,
|
|
47
|
+
metadata,
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
/**
|
|
53
|
+
* Find all parts of a multi-part file in a directory
|
|
54
|
+
* @param {string} dirPath - Directory to search
|
|
55
|
+
* @param {string} hash - Original hash from metadata
|
|
56
|
+
* @param {string} format - File format ('xlsx' or 'docx')
|
|
57
|
+
* @param {number} [expectedParts] - Expected total parts (optional, for validation)
|
|
58
|
+
* @returns {Array<{path: string, partNumber: number, filename: string}>} Array of parts sorted by part number
|
|
59
|
+
*/
|
|
60
|
+
function findMultiPartFiles(dirPath, hash, format, expectedParts = null) {
|
|
61
|
+
const files = fs.readdirSync(dirPath);
|
|
62
|
+
const parts = [];
|
|
63
|
+
const ext = format === 'docx' ? '.docx' : '.xlsx';
|
|
64
|
+
|
|
65
|
+
// Also support legacy hex filenames for backward compatibility
|
|
66
|
+
const legacyBaseHash = hash.length >= 16 ? hash.slice(0, 16) : hash;
|
|
67
|
+
|
|
68
|
+
for (const file of files) {
|
|
69
|
+
if (!file.toLowerCase().endsWith(ext)) continue;
|
|
70
|
+
|
|
71
|
+
const parsed = parseFilename(file);
|
|
72
|
+
if (!parsed || parsed.partNumber === null) continue;
|
|
73
|
+
|
|
74
|
+
// Match by new realistic filename pattern
|
|
75
|
+
// Check if file matches the expected pattern (same reportId from hash)
|
|
76
|
+
if (parsed.reportId) {
|
|
77
|
+
// New realistic format - match by reportId (last 4 chars of hash)
|
|
78
|
+
// This is deterministic and doesn't depend on current date
|
|
79
|
+
const expectedReportId = hash.slice(-4).toUpperCase();
|
|
80
|
+
if (parsed.reportId === expectedReportId) {
|
|
81
|
+
parts.push({
|
|
82
|
+
path: path.join(dirPath, file),
|
|
83
|
+
partNumber: parsed.partNumber,
|
|
84
|
+
filename: file,
|
|
85
|
+
dateStr: parsed.dateStr,
|
|
86
|
+
timeStr: parsed.timeStr,
|
|
87
|
+
});
|
|
88
|
+
}
|
|
89
|
+
} else if (parsed.baseHash === legacyBaseHash) {
|
|
90
|
+
// Legacy hex format - match by base hash
|
|
91
|
+
parts.push({
|
|
92
|
+
path: path.join(dirPath, file),
|
|
93
|
+
partNumber: parsed.partNumber,
|
|
94
|
+
filename: file,
|
|
95
|
+
});
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// Sort by part number
|
|
100
|
+
parts.sort((a, b) => a.partNumber - b.partNumber);
|
|
101
|
+
|
|
102
|
+
// For realistic filenames, ensure all parts have the same date/time pattern
|
|
103
|
+
// This handles edge cases where multiple file sets might share the same reportId
|
|
104
|
+
if (parts.length > 0 && parts[0].dateStr) {
|
|
105
|
+
const refDateStr = parts[0].dateStr;
|
|
106
|
+
const refTimeStr = parts[0].timeStr;
|
|
107
|
+
const filteredParts = parts.filter(
|
|
108
|
+
(p) => p.dateStr === refDateStr && p.timeStr === refTimeStr
|
|
109
|
+
);
|
|
110
|
+
// If filtering removed some parts, use the filtered set
|
|
111
|
+
if (filteredParts.length !== parts.length) {
|
|
112
|
+
parts.length = 0;
|
|
113
|
+
parts.push(...filteredParts);
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
// Validate sequential parts if expectedParts is provided
|
|
118
|
+
if (expectedParts !== null && parts.length === expectedParts) {
|
|
119
|
+
for (let i = 0; i < expectedParts; i++) {
|
|
120
|
+
if (parts[i].partNumber !== i + 1) {
|
|
121
|
+
throw new Error(`Missing part ${i + 1}. Parts must be sequential.`);
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
return parts;
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
/**
|
|
130
|
+
* Check if a directory is writable
|
|
131
|
+
* @param {string} dirPath - Directory path to check
|
|
132
|
+
* @returns {boolean} True if writable
|
|
133
|
+
*/
|
|
134
|
+
function isDirectoryWritable(dirPath) {
|
|
135
|
+
try {
|
|
136
|
+
if (!fs.existsSync(dirPath)) {
|
|
137
|
+
fs.mkdirSync(dirPath, { recursive: true });
|
|
138
|
+
}
|
|
139
|
+
fs.accessSync(dirPath, fs.constants.W_OK);
|
|
140
|
+
return true;
|
|
141
|
+
} catch {
|
|
142
|
+
return false;
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
/**
|
|
147
|
+
* Merge base64 chunks back into a single string
|
|
148
|
+
* @param {Array<string>} chunks - Array of base64 chunks
|
|
149
|
+
* @returns {string} Merged base64 string
|
|
150
|
+
*/
|
|
151
|
+
function mergeBase64Chunks(chunks) {
|
|
152
|
+
return chunks.join('');
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
module.exports = {
|
|
156
|
+
extractContent,
|
|
157
|
+
findMultiPartFiles,
|
|
158
|
+
isDirectoryWritable,
|
|
159
|
+
mergeBase64Chunks,
|
|
160
|
+
};
|