ezshare-cli 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +311 -0
- package/bin/ezshare.js +2 -0
- package/dist/cli.js +60 -0
- package/dist/commands/receive.js +129 -0
- package/dist/commands/send.js +117 -0
- package/dist/components/FileBrowser.js +75 -0
- package/dist/components/HelpScreen.js +10 -0
- package/dist/components/MainMenu.js +9 -0
- package/dist/components/Shell.js +88 -0
- package/dist/components/TransferUI.js +6 -0
- package/dist/utils/compression.js +354 -0
- package/dist/utils/crypto.js +236 -0
- package/dist/utils/fileSystem.js +89 -0
- package/dist/utils/network.js +98 -0
- package/dist/utils/tar.js +169 -0
- package/package.json +56 -0
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Crypto utilities for HyperStream
|
|
3
|
+
*
|
|
4
|
+
* Uses AES-256-GCM with chunked encryption for true streaming support.
|
|
5
|
+
* Each chunk is independently authenticated, allowing fail-fast on corruption.
|
|
6
|
+
*
|
|
7
|
+
* Stream format:
|
|
8
|
+
* [4 bytes: nonce prefix] <- sent once at start
|
|
9
|
+
* [chunks...]
|
|
10
|
+
* [4 bytes: 0x00000000] <- end marker
|
|
11
|
+
*
|
|
12
|
+
* Chunk format:
|
|
13
|
+
* [4 bytes: plaintext length, big-endian]
|
|
14
|
+
* [ciphertext]
|
|
15
|
+
* [16 bytes: GCM auth tag]
|
|
16
|
+
*
|
|
17
|
+
* Nonce construction for chunk N:
|
|
18
|
+
* [4-byte random prefix][8-byte big-endian counter N]
|
|
19
|
+
*
|
|
20
|
+
* This ensures unique nonces across all chunks and sessions.
|
|
21
|
+
*/
|
|
22
|
+
import { Transform } from 'node:stream';
|
|
23
|
+
import { createCipheriv, createDecipheriv, randomBytes, hkdfSync, } from 'node:crypto';
|
|
24
|
+
// Constants
|
|
25
|
+
const ALGORITHM = 'aes-256-gcm';
|
|
26
|
+
const CHUNK_SIZE = 64 * 1024; // 64KB chunks - good balance of overhead vs latency
|
|
27
|
+
const NONCE_PREFIX_SIZE = 4;
|
|
28
|
+
const NONCE_SIZE = 12; // 4 prefix + 8 counter (standard GCM nonce)
|
|
29
|
+
const TAG_SIZE = 16; // GCM authentication tag
|
|
30
|
+
const LENGTH_SIZE = 4; // uint32 for chunk length
|
|
31
|
+
const KEY_SIZE = 32; // AES-256
|
|
32
|
+
// HKDF parameters for key derivation
|
|
33
|
+
const HKDF_SALT = 'hyperstream-v1';
|
|
34
|
+
const HKDF_INFO = 'aes-256-gcm';
|
|
35
|
+
/**
|
|
36
|
+
* Derive AES-256 key from topic using HKDF-SHA256
|
|
37
|
+
*
|
|
38
|
+
* Never use the topic directly as a key - always derive through HKDF
|
|
39
|
+
* for proper cryptographic key derivation with domain separation.
|
|
40
|
+
*/
|
|
41
|
+
export function deriveKey(topic) {
|
|
42
|
+
if (topic.length !== KEY_SIZE) {
|
|
43
|
+
throw new Error(`Topic must be ${KEY_SIZE} bytes, got ${topic.length}`);
|
|
44
|
+
}
|
|
45
|
+
return Buffer.from(hkdfSync('sha256', topic, HKDF_SALT, HKDF_INFO, KEY_SIZE));
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Generate a random topic key for sharing
|
|
49
|
+
*
|
|
50
|
+
* @returns Object with raw topic buffer and display-friendly base64url string
|
|
51
|
+
*/
|
|
52
|
+
export function generateTopicKey() {
|
|
53
|
+
const topic = randomBytes(KEY_SIZE);
|
|
54
|
+
const displayKey = topic.toString('base64url');
|
|
55
|
+
return { topic, displayKey };
|
|
56
|
+
}
|
|
57
|
+
/**
|
|
58
|
+
* Parse a display key back to a topic buffer
|
|
59
|
+
*
|
|
60
|
+
* @param displayKey - base64url encoded topic key
|
|
61
|
+
* @throws Error if key is invalid length
|
|
62
|
+
*/
|
|
63
|
+
export function parseTopicKey(displayKey) {
|
|
64
|
+
const topic = Buffer.from(displayKey, 'base64url');
|
|
65
|
+
if (topic.length !== KEY_SIZE) {
|
|
66
|
+
throw new Error(`Invalid key: expected ${KEY_SIZE} bytes, got ${topic.length}`);
|
|
67
|
+
}
|
|
68
|
+
return topic;
|
|
69
|
+
}
|
|
70
|
+
/**
|
|
71
|
+
* Construct nonce from prefix and counter
|
|
72
|
+
*/
|
|
73
|
+
function makeNonce(prefix, counter) {
|
|
74
|
+
const nonce = Buffer.alloc(NONCE_SIZE);
|
|
75
|
+
prefix.copy(nonce, 0, 0, NONCE_PREFIX_SIZE);
|
|
76
|
+
nonce.writeBigUInt64BE(counter, NONCE_PREFIX_SIZE);
|
|
77
|
+
return nonce;
|
|
78
|
+
}
|
|
79
|
+
/**
|
|
80
|
+
* Encrypt a single chunk with AES-256-GCM
|
|
81
|
+
*/
|
|
82
|
+
function encryptChunk(key, noncePrefix, counter, plaintext) {
|
|
83
|
+
const nonce = makeNonce(noncePrefix, counter);
|
|
84
|
+
const cipher = createCipheriv(ALGORITHM, key, nonce);
|
|
85
|
+
const ciphertext = Buffer.concat([
|
|
86
|
+
cipher.update(plaintext),
|
|
87
|
+
cipher.final(),
|
|
88
|
+
]);
|
|
89
|
+
const tag = cipher.getAuthTag();
|
|
90
|
+
// Output format: [length][ciphertext][tag]
|
|
91
|
+
const output = Buffer.alloc(LENGTH_SIZE + ciphertext.length + TAG_SIZE);
|
|
92
|
+
output.writeUInt32BE(plaintext.length, 0);
|
|
93
|
+
ciphertext.copy(output, LENGTH_SIZE);
|
|
94
|
+
tag.copy(output, LENGTH_SIZE + ciphertext.length);
|
|
95
|
+
return output;
|
|
96
|
+
}
|
|
97
|
+
/**
|
|
98
|
+
* Decrypt a single chunk with AES-256-GCM
|
|
99
|
+
*
|
|
100
|
+
* @throws Error if authentication fails (wrong key or corrupted data)
|
|
101
|
+
*/
|
|
102
|
+
function decryptChunk(key, noncePrefix, counter, ciphertext, tag) {
|
|
103
|
+
const nonce = makeNonce(noncePrefix, counter);
|
|
104
|
+
const decipher = createDecipheriv(ALGORITHM, key, nonce);
|
|
105
|
+
decipher.setAuthTag(tag);
|
|
106
|
+
return Buffer.concat([decipher.update(ciphertext), decipher.final()]);
|
|
107
|
+
}
|
|
108
|
+
/**
|
|
109
|
+
* Create an encrypting transform stream
|
|
110
|
+
*
|
|
111
|
+
* Buffers input data and encrypts in CHUNK_SIZE chunks.
|
|
112
|
+
* Outputs:
|
|
113
|
+
* 1. Nonce prefix (4 bytes) - sent first
|
|
114
|
+
* 2. Encrypted chunks
|
|
115
|
+
* 3. End marker (4 zero bytes) - sent last
|
|
116
|
+
*
|
|
117
|
+
* @param key - 32-byte AES key (use deriveKey to get this from topic)
|
|
118
|
+
*/
|
|
119
|
+
export function createEncryptStream(key) {
|
|
120
|
+
const noncePrefix = randomBytes(NONCE_PREFIX_SIZE);
|
|
121
|
+
let counter = 0n;
|
|
122
|
+
let buffer = Buffer.alloc(0);
|
|
123
|
+
let headerSent = false;
|
|
124
|
+
return new Transform({
|
|
125
|
+
transform(chunk, _encoding, callback) {
|
|
126
|
+
try {
|
|
127
|
+
// Send nonce prefix header on first data
|
|
128
|
+
if (!headerSent) {
|
|
129
|
+
this.push(noncePrefix);
|
|
130
|
+
headerSent = true;
|
|
131
|
+
}
|
|
132
|
+
buffer = Buffer.concat([buffer, chunk]);
|
|
133
|
+
// Process complete chunks
|
|
134
|
+
while (buffer.length >= CHUNK_SIZE) {
|
|
135
|
+
const plaintext = buffer.subarray(0, CHUNK_SIZE);
|
|
136
|
+
buffer = buffer.subarray(CHUNK_SIZE);
|
|
137
|
+
const encrypted = encryptChunk(key, noncePrefix, counter, plaintext);
|
|
138
|
+
counter++;
|
|
139
|
+
this.push(encrypted);
|
|
140
|
+
}
|
|
141
|
+
callback();
|
|
142
|
+
}
|
|
143
|
+
catch (err) {
|
|
144
|
+
callback(err instanceof Error ? err : new Error(String(err)));
|
|
145
|
+
}
|
|
146
|
+
},
|
|
147
|
+
flush(callback) {
|
|
148
|
+
try {
|
|
149
|
+
// Encrypt any remaining buffered data
|
|
150
|
+
if (buffer.length > 0) {
|
|
151
|
+
const encrypted = encryptChunk(key, noncePrefix, counter, buffer);
|
|
152
|
+
this.push(encrypted);
|
|
153
|
+
}
|
|
154
|
+
// Send end marker (length = 0)
|
|
155
|
+
const endMarker = Buffer.alloc(LENGTH_SIZE, 0);
|
|
156
|
+
this.push(endMarker);
|
|
157
|
+
callback();
|
|
158
|
+
}
|
|
159
|
+
catch (err) {
|
|
160
|
+
callback(err instanceof Error ? err : new Error(String(err)));
|
|
161
|
+
}
|
|
162
|
+
},
|
|
163
|
+
});
|
|
164
|
+
}
|
|
165
|
+
/**
|
|
166
|
+
* Create a decrypting transform stream
|
|
167
|
+
*
|
|
168
|
+
* Parses and decrypts the encrypted stream format.
|
|
169
|
+
* Expects:
|
|
170
|
+
* 1. Nonce prefix (4 bytes)
|
|
171
|
+
* 2. Encrypted chunks
|
|
172
|
+
* 3. End marker (4 zero bytes)
|
|
173
|
+
*
|
|
174
|
+
* @param key - 32-byte AES key (use deriveKey to get this from topic)
|
|
175
|
+
* @throws Propagates authentication errors if data is corrupted
|
|
176
|
+
*/
|
|
177
|
+
export function createDecryptStream(key) {
|
|
178
|
+
let noncePrefix = null;
|
|
179
|
+
let counter = 0n;
|
|
180
|
+
let buffer = Buffer.alloc(0);
|
|
181
|
+
let chunkLength = null;
|
|
182
|
+
let ended = false;
|
|
183
|
+
return new Transform({
|
|
184
|
+
transform(chunk, _encoding, callback) {
|
|
185
|
+
if (ended) {
|
|
186
|
+
return callback();
|
|
187
|
+
}
|
|
188
|
+
buffer = Buffer.concat([buffer, chunk]);
|
|
189
|
+
try {
|
|
190
|
+
// Phase 1: Read nonce prefix (once)
|
|
191
|
+
if (noncePrefix === null) {
|
|
192
|
+
if (buffer.length < NONCE_PREFIX_SIZE) {
|
|
193
|
+
return callback(); // Need more data
|
|
194
|
+
}
|
|
195
|
+
noncePrefix = Buffer.from(buffer.subarray(0, NONCE_PREFIX_SIZE));
|
|
196
|
+
buffer = buffer.subarray(NONCE_PREFIX_SIZE);
|
|
197
|
+
}
|
|
198
|
+
// Phase 2: Process chunks
|
|
199
|
+
while (!ended) {
|
|
200
|
+
// Read chunk length if not yet known
|
|
201
|
+
if (chunkLength === null) {
|
|
202
|
+
if (buffer.length < LENGTH_SIZE) {
|
|
203
|
+
return callback(); // Need more data
|
|
204
|
+
}
|
|
205
|
+
chunkLength = buffer.readUInt32BE(0);
|
|
206
|
+
buffer = buffer.subarray(LENGTH_SIZE);
|
|
207
|
+
// Check for end marker
|
|
208
|
+
if (chunkLength === 0) {
|
|
209
|
+
ended = true;
|
|
210
|
+
return callback();
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
// Wait for complete chunk (ciphertext + tag)
|
|
214
|
+
const neededBytes = chunkLength + TAG_SIZE;
|
|
215
|
+
if (buffer.length < neededBytes) {
|
|
216
|
+
return callback(); // Need more data
|
|
217
|
+
}
|
|
218
|
+
// Extract ciphertext and tag
|
|
219
|
+
const ciphertext = buffer.subarray(0, chunkLength);
|
|
220
|
+
const tag = buffer.subarray(chunkLength, neededBytes);
|
|
221
|
+
buffer = buffer.subarray(neededBytes);
|
|
222
|
+
// Decrypt and output
|
|
223
|
+
const plaintext = decryptChunk(key, noncePrefix, counter, ciphertext, tag);
|
|
224
|
+
counter++;
|
|
225
|
+
chunkLength = null;
|
|
226
|
+
this.push(plaintext);
|
|
227
|
+
}
|
|
228
|
+
callback();
|
|
229
|
+
}
|
|
230
|
+
catch (err) {
|
|
231
|
+
// Authentication failures will be caught here
|
|
232
|
+
callback(err instanceof Error ? err : new Error(String(err)));
|
|
233
|
+
}
|
|
234
|
+
},
|
|
235
|
+
});
|
|
236
|
+
}
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
import { readdir, stat } from 'node:fs/promises';
|
|
2
|
+
import { join } from 'node:path';
|
|
3
|
+
/**
|
|
4
|
+
* Read a directory and return sorted file entries
|
|
5
|
+
* @param dirPath - Path to directory to read
|
|
6
|
+
* @param showHidden - Whether to include hidden files (default: false)
|
|
7
|
+
* @returns Array of file entries sorted (directories first, then alphabetically)
|
|
8
|
+
*/
|
|
9
|
+
export async function readDirectory(dirPath, showHidden = false) {
|
|
10
|
+
try {
|
|
11
|
+
const entries = await readdir(dirPath, { withFileTypes: true });
|
|
12
|
+
const fileEntries = [];
|
|
13
|
+
for (const entry of entries) {
|
|
14
|
+
const name = entry.name;
|
|
15
|
+
const isHidden = name.startsWith('.');
|
|
16
|
+
// Skip hidden files unless showHidden is true
|
|
17
|
+
if (isHidden && !showHidden) {
|
|
18
|
+
continue;
|
|
19
|
+
}
|
|
20
|
+
const fullPath = join(dirPath, name);
|
|
21
|
+
let size;
|
|
22
|
+
// Get size for files
|
|
23
|
+
if (entry.isFile()) {
|
|
24
|
+
try {
|
|
25
|
+
const stats = await stat(fullPath);
|
|
26
|
+
size = stats.size;
|
|
27
|
+
}
|
|
28
|
+
catch {
|
|
29
|
+
// If we can't stat the file, skip the size
|
|
30
|
+
size = undefined;
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
fileEntries.push({
|
|
34
|
+
name,
|
|
35
|
+
path: fullPath,
|
|
36
|
+
isDirectory: entry.isDirectory(),
|
|
37
|
+
size,
|
|
38
|
+
isHidden,
|
|
39
|
+
});
|
|
40
|
+
}
|
|
41
|
+
// Sort: directories first, then files, both alphabetically
|
|
42
|
+
fileEntries.sort((a, b) => {
|
|
43
|
+
// Directories come before files
|
|
44
|
+
if (a.isDirectory && !b.isDirectory)
|
|
45
|
+
return -1;
|
|
46
|
+
if (!a.isDirectory && b.isDirectory)
|
|
47
|
+
return 1;
|
|
48
|
+
// Within same type, sort alphabetically (case-insensitive)
|
|
49
|
+
return a.name.toLowerCase().localeCompare(b.name.toLowerCase());
|
|
50
|
+
});
|
|
51
|
+
return fileEntries;
|
|
52
|
+
}
|
|
53
|
+
catch (error) {
|
|
54
|
+
// If directory can't be read, return empty array
|
|
55
|
+
console.error(`Error reading directory ${dirPath}:`, error);
|
|
56
|
+
return [];
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
/**
|
|
60
|
+
* Get information about a specific file or directory
|
|
61
|
+
* @param filePath - Path to file or directory
|
|
62
|
+
* @returns FileEntry with file information
|
|
63
|
+
*/
|
|
64
|
+
export async function getFileInfo(filePath) {
|
|
65
|
+
const stats = await stat(filePath);
|
|
66
|
+
const name = filePath.split('/').pop() || filePath;
|
|
67
|
+
return {
|
|
68
|
+
name,
|
|
69
|
+
path: filePath,
|
|
70
|
+
isDirectory: stats.isDirectory(),
|
|
71
|
+
size: stats.isFile() ? stats.size : undefined,
|
|
72
|
+
isHidden: name.startsWith('.'),
|
|
73
|
+
};
|
|
74
|
+
}
|
|
75
|
+
/**
|
|
76
|
+
* Format bytes to human-readable file size
|
|
77
|
+
* @param bytes - Size in bytes
|
|
78
|
+
* @returns Formatted string like "1.5 MB", "500 KB", etc.
|
|
79
|
+
*/
|
|
80
|
+
export function formatFileSize(bytes) {
|
|
81
|
+
if (bytes === 0)
|
|
82
|
+
return '0 B';
|
|
83
|
+
const units = ['B', 'KB', 'MB', 'GB', 'TB'];
|
|
84
|
+
const k = 1024;
|
|
85
|
+
const i = Math.floor(Math.log(bytes) / Math.log(k));
|
|
86
|
+
// Limit to 2 decimal places and remove trailing zeros
|
|
87
|
+
const value = parseFloat((bytes / Math.pow(k, i)).toFixed(2));
|
|
88
|
+
return `${value} ${units[i]}`;
|
|
89
|
+
}
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
import Hyperswarm from 'hyperswarm';
|
|
2
|
+
import { appendFileSync } from 'node:fs';
|
|
3
|
+
const CONNECTION_TIMEOUT = 30000; // 30 seconds
|
|
4
|
+
const LOG_FILE = '/tmp/ezshare_debug.log';
|
|
5
|
+
function debugLog(message) {
|
|
6
|
+
const timestamp = new Date().toISOString();
|
|
7
|
+
const logLine = `[${timestamp}] ${message}\n`;
|
|
8
|
+
try {
|
|
9
|
+
appendFileSync(LOG_FILE, logLine);
|
|
10
|
+
}
|
|
11
|
+
catch (e) {
|
|
12
|
+
// Ignore file write errors
|
|
13
|
+
}
|
|
14
|
+
console.error(message); // Use stderr to avoid Ink interference
|
|
15
|
+
}
|
|
16
|
+
/**
|
|
17
|
+
* Create a sender swarm that announces to the DHT and waits for a peer connection
|
|
18
|
+
* @param topic - The topic buffer to join (32 bytes)
|
|
19
|
+
* @returns Swarm instance and a function to wait for the first peer connection
|
|
20
|
+
*/
|
|
21
|
+
export async function createSenderSwarm(topic) {
|
|
22
|
+
const swarm = new Hyperswarm();
|
|
23
|
+
debugLog('[Sender] Creating sender swarm...');
|
|
24
|
+
debugLog('[Sender] Topic hash: ' + topic.toString('hex'));
|
|
25
|
+
debugLog('[Sender] Topic base64url: ' + topic.toString('base64url'));
|
|
26
|
+
// Enable both server and client for better NAT traversal
|
|
27
|
+
const discovery = swarm.join(topic, { server: true, client: true });
|
|
28
|
+
debugLog('[Sender] Announcing to DHT...');
|
|
29
|
+
// Wait for the topic to be fully announced to the DHT
|
|
30
|
+
await discovery.flushed();
|
|
31
|
+
debugLog('[Sender] DHT announcement complete, waiting for peer...');
|
|
32
|
+
// Create a promise that resolves when a peer connects
|
|
33
|
+
const waitForPeer = () => {
|
|
34
|
+
return new Promise((resolve, reject) => {
|
|
35
|
+
// Set timeout for connection
|
|
36
|
+
const timeout = setTimeout(() => {
|
|
37
|
+
reject(new Error(`Connection timeout after ${CONNECTION_TIMEOUT / 1000}s. ` +
|
|
38
|
+
'Peer may not be online or DHT discovery failed. ' +
|
|
39
|
+
'Ensure both peers are started within a few seconds of each other.'));
|
|
40
|
+
}, CONNECTION_TIMEOUT);
|
|
41
|
+
swarm.once('connection', (socket) => {
|
|
42
|
+
clearTimeout(timeout);
|
|
43
|
+
debugLog('[Sender] Peer connected!');
|
|
44
|
+
resolve(socket);
|
|
45
|
+
});
|
|
46
|
+
// Debug: log peer discovery
|
|
47
|
+
swarm.on('peer-add', () => {
|
|
48
|
+
debugLog('[Sender] Peer discovered via DHT');
|
|
49
|
+
});
|
|
50
|
+
});
|
|
51
|
+
};
|
|
52
|
+
return { swarm, waitForPeer };
|
|
53
|
+
}
|
|
54
|
+
/**
|
|
55
|
+
* Create a receiver swarm that connects to a sender
|
|
56
|
+
* @param topic - The topic buffer to join (32 bytes)
|
|
57
|
+
* @returns Swarm instance and a function to wait for the first peer connection
|
|
58
|
+
*/
|
|
59
|
+
export async function createReceiverSwarm(topic) {
|
|
60
|
+
const swarm = new Hyperswarm();
|
|
61
|
+
debugLog('[Receiver] Creating receiver swarm...');
|
|
62
|
+
debugLog('[Receiver] Topic hash: ' + topic.toString('hex'));
|
|
63
|
+
debugLog('[Receiver] Topic base64url: ' + topic.toString('base64url'));
|
|
64
|
+
// CRITICAL: Register connection listener BEFORE joining/flushing
|
|
65
|
+
// Otherwise we miss the connection event in the race condition
|
|
66
|
+
const connectionPromise = new Promise((resolve, reject) => {
|
|
67
|
+
// Set timeout for connection
|
|
68
|
+
const timeout = setTimeout(() => {
|
|
69
|
+
reject(new Error(`Connection timeout after ${CONNECTION_TIMEOUT / 1000}s. ` +
|
|
70
|
+
'Could not find or connect to sender. ' +
|
|
71
|
+
'Ensure sender is running and you entered the correct share key.'));
|
|
72
|
+
}, CONNECTION_TIMEOUT);
|
|
73
|
+
swarm.once('connection', (socket) => {
|
|
74
|
+
clearTimeout(timeout);
|
|
75
|
+
debugLog('[Receiver] Connected to peer!');
|
|
76
|
+
resolve(socket);
|
|
77
|
+
});
|
|
78
|
+
// Debug: log peer discovery
|
|
79
|
+
swarm.on('peer-add', () => {
|
|
80
|
+
debugLog('[Receiver] Peer discovered via DHT, connecting...');
|
|
81
|
+
});
|
|
82
|
+
});
|
|
83
|
+
// Enable both server and client for better NAT traversal
|
|
84
|
+
swarm.join(topic, { server: true, client: true });
|
|
85
|
+
debugLog('[Receiver] Looking up peers in DHT...');
|
|
86
|
+
// Flush the swarm to start connecting
|
|
87
|
+
await swarm.flush();
|
|
88
|
+
debugLog('[Receiver] DHT lookup complete, waiting for connection...');
|
|
89
|
+
return { swarm, connectionPromise };
|
|
90
|
+
}
|
|
91
|
+
/**
|
|
92
|
+
* Clean up and destroy a swarm instance
|
|
93
|
+
* @param swarm - The Hyperswarm instance to clean up
|
|
94
|
+
*/
|
|
95
|
+
export async function cleanupSwarm(swarm) {
|
|
96
|
+
// Destroy the swarm and close all connections
|
|
97
|
+
await swarm.destroy();
|
|
98
|
+
}
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
import { createReadStream, createWriteStream } from 'node:fs';
|
|
2
|
+
import { readdir, stat, mkdir } from 'node:fs/promises';
|
|
3
|
+
import { join, dirname, relative } from 'node:path';
|
|
4
|
+
import tarStream from 'tar-stream';
|
|
5
|
+
/**
|
|
6
|
+
* Get metadata about the transfer source (file or directory)
|
|
7
|
+
* @param sourcePath - Path to file or directory
|
|
8
|
+
* @returns Metadata including total size, file count, and whether it's a directory
|
|
9
|
+
*/
|
|
10
|
+
export async function getTransferMetadata(sourcePath) {
|
|
11
|
+
const stats = await stat(sourcePath);
|
|
12
|
+
if (!stats.isDirectory()) {
|
|
13
|
+
// Single file
|
|
14
|
+
return {
|
|
15
|
+
totalSize: stats.size,
|
|
16
|
+
fileCount: 1,
|
|
17
|
+
isDirectory: false,
|
|
18
|
+
};
|
|
19
|
+
}
|
|
20
|
+
// Directory - walk recursively to count files and total size
|
|
21
|
+
let totalSize = 0;
|
|
22
|
+
let fileCount = 0;
|
|
23
|
+
async function walk(currentPath) {
|
|
24
|
+
const entries = await readdir(currentPath, { withFileTypes: true });
|
|
25
|
+
for (const entry of entries) {
|
|
26
|
+
const fullPath = join(currentPath, entry.name);
|
|
27
|
+
if (entry.isDirectory()) {
|
|
28
|
+
await walk(fullPath);
|
|
29
|
+
}
|
|
30
|
+
else if (entry.isFile()) {
|
|
31
|
+
const fileStats = await stat(fullPath);
|
|
32
|
+
totalSize += fileStats.size;
|
|
33
|
+
fileCount++;
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
await walk(sourcePath);
|
|
38
|
+
return {
|
|
39
|
+
totalSize,
|
|
40
|
+
fileCount,
|
|
41
|
+
isDirectory: true,
|
|
42
|
+
};
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* Create a readable stream that packs a file or directory into tar format
|
|
46
|
+
* @param sourcePath - Path to file or directory to pack
|
|
47
|
+
* @returns Readable stream of tar data
|
|
48
|
+
*/
|
|
49
|
+
export function createPackStream(sourcePath) {
|
|
50
|
+
const pack = tarStream.pack();
|
|
51
|
+
// Start packing asynchronously
|
|
52
|
+
(async () => {
|
|
53
|
+
try {
|
|
54
|
+
const stats = await stat(sourcePath);
|
|
55
|
+
if (!stats.isDirectory()) {
|
|
56
|
+
// Single file - add it directly
|
|
57
|
+
const relativeName = sourcePath.split('/').pop() || 'file';
|
|
58
|
+
const fileStream = createReadStream(sourcePath);
|
|
59
|
+
const entry = pack.entry({
|
|
60
|
+
name: relativeName,
|
|
61
|
+
size: stats.size,
|
|
62
|
+
mode: stats.mode,
|
|
63
|
+
mtime: stats.mtime,
|
|
64
|
+
});
|
|
65
|
+
fileStream.pipe(entry);
|
|
66
|
+
await new Promise((resolve, reject) => {
|
|
67
|
+
entry.on('finish', resolve);
|
|
68
|
+
entry.on('error', reject);
|
|
69
|
+
fileStream.on('error', reject);
|
|
70
|
+
});
|
|
71
|
+
pack.finalize();
|
|
72
|
+
return;
|
|
73
|
+
}
|
|
74
|
+
// Directory - walk recursively and add all files and directories
|
|
75
|
+
async function addDirectory(currentPath, basePath) {
|
|
76
|
+
const entries = await readdir(currentPath, { withFileTypes: true });
|
|
77
|
+
for (const entry of entries) {
|
|
78
|
+
const fullPath = join(currentPath, entry.name);
|
|
79
|
+
const relativePath = relative(basePath, fullPath);
|
|
80
|
+
if (entry.isDirectory()) {
|
|
81
|
+
// Add directory entry
|
|
82
|
+
const dirStats = await stat(fullPath);
|
|
83
|
+
await new Promise((resolve, reject) => {
|
|
84
|
+
pack.entry({
|
|
85
|
+
name: relativePath + '/',
|
|
86
|
+
type: 'directory',
|
|
87
|
+
mode: dirStats.mode,
|
|
88
|
+
mtime: dirStats.mtime,
|
|
89
|
+
}, (err) => {
|
|
90
|
+
if (err)
|
|
91
|
+
reject(err);
|
|
92
|
+
else
|
|
93
|
+
resolve();
|
|
94
|
+
});
|
|
95
|
+
});
|
|
96
|
+
// Recursively add subdirectory contents
|
|
97
|
+
await addDirectory(fullPath, basePath);
|
|
98
|
+
}
|
|
99
|
+
else if (entry.isFile()) {
|
|
100
|
+
// Add file entry
|
|
101
|
+
const fileStats = await stat(fullPath);
|
|
102
|
+
const fileStream = createReadStream(fullPath);
|
|
103
|
+
const tarEntry = pack.entry({
|
|
104
|
+
name: relativePath,
|
|
105
|
+
size: fileStats.size,
|
|
106
|
+
mode: fileStats.mode,
|
|
107
|
+
mtime: fileStats.mtime,
|
|
108
|
+
});
|
|
109
|
+
fileStream.pipe(tarEntry);
|
|
110
|
+
await new Promise((resolve, reject) => {
|
|
111
|
+
tarEntry.on('finish', resolve);
|
|
112
|
+
tarEntry.on('error', reject);
|
|
113
|
+
fileStream.on('error', reject);
|
|
114
|
+
});
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
await addDirectory(sourcePath, dirname(sourcePath));
|
|
119
|
+
pack.finalize();
|
|
120
|
+
}
|
|
121
|
+
catch (error) {
|
|
122
|
+
pack.destroy(error);
|
|
123
|
+
}
|
|
124
|
+
})();
|
|
125
|
+
return pack;
|
|
126
|
+
}
|
|
127
|
+
/**
|
|
128
|
+
* Create a writable stream that extracts tar data to a destination directory
|
|
129
|
+
* @param destPath - Destination directory for extracted files
|
|
130
|
+
* @returns Writable stream that accepts tar data
|
|
131
|
+
*/
|
|
132
|
+
export function createExtractStream(destPath) {
|
|
133
|
+
const extract = tarStream.extract();
|
|
134
|
+
extract.on('entry', (header, stream, next) => {
|
|
135
|
+
const outputPath = join(destPath, header.name);
|
|
136
|
+
// Ensure parent directory exists
|
|
137
|
+
(async () => {
|
|
138
|
+
try {
|
|
139
|
+
const parentDir = dirname(outputPath);
|
|
140
|
+
await mkdir(parentDir, { recursive: true });
|
|
141
|
+
if (header.type === 'file') {
|
|
142
|
+
// Write file
|
|
143
|
+
const writeStream = createWriteStream(outputPath, {
|
|
144
|
+
mode: header.mode,
|
|
145
|
+
});
|
|
146
|
+
stream.pipe(writeStream);
|
|
147
|
+
await new Promise((resolve, reject) => {
|
|
148
|
+
writeStream.on('finish', resolve);
|
|
149
|
+
writeStream.on('error', reject);
|
|
150
|
+
stream.on('error', reject);
|
|
151
|
+
});
|
|
152
|
+
}
|
|
153
|
+
else if (header.type === 'directory') {
|
|
154
|
+
// Create directory
|
|
155
|
+
await mkdir(outputPath, { recursive: true, mode: header.mode });
|
|
156
|
+
stream.resume(); // Drain the stream
|
|
157
|
+
}
|
|
158
|
+
else {
|
|
159
|
+
stream.resume(); // Skip unsupported types
|
|
160
|
+
}
|
|
161
|
+
next();
|
|
162
|
+
}
|
|
163
|
+
catch (error) {
|
|
164
|
+
next(error);
|
|
165
|
+
}
|
|
166
|
+
})();
|
|
167
|
+
});
|
|
168
|
+
return extract;
|
|
169
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "ezshare-cli",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Secure P2P file transfer CLI with end-to-end encryption - No servers, no signups, just share",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"bin": {
|
|
7
|
+
"ezshare": "bin/ezshare.js"
|
|
8
|
+
},
|
|
9
|
+
"scripts": {
|
|
10
|
+
"build": "tsc",
|
|
11
|
+
"dev": "tsx src/cli.tsx",
|
|
12
|
+
"test": "tsx --test src/**/*.test.ts",
|
|
13
|
+
"prepublishOnly": "npm run build"
|
|
14
|
+
},
|
|
15
|
+
"keywords": [
|
|
16
|
+
"p2p",
|
|
17
|
+
"file-transfer",
|
|
18
|
+
"file-sharing",
|
|
19
|
+
"hyperswarm",
|
|
20
|
+
"cli",
|
|
21
|
+
"decentralized",
|
|
22
|
+
"encrypted",
|
|
23
|
+
"dht",
|
|
24
|
+
"peer-to-peer",
|
|
25
|
+
"secure-transfer",
|
|
26
|
+
"no-server"
|
|
27
|
+
],
|
|
28
|
+
"author": "Blake",
|
|
29
|
+
"license": "MIT",
|
|
30
|
+
"repository": {
|
|
31
|
+
"type": "git",
|
|
32
|
+
"url": "git+https://github.com/yulchanshin/ezsharecli.git"
|
|
33
|
+
},
|
|
34
|
+
"bugs": {
|
|
35
|
+
"url": "https://github.com/yulchanshin/ezsharecli/issues"
|
|
36
|
+
},
|
|
37
|
+
"homepage": "https://github.com/yulchanshin/ezsharecli#readme",
|
|
38
|
+
"engines": {
|
|
39
|
+
"node": ">=18.0.0"
|
|
40
|
+
},
|
|
41
|
+
"dependencies": {
|
|
42
|
+
"@inkjs/ui": "^2.0.0",
|
|
43
|
+
"hyperswarm": "^4.16.0",
|
|
44
|
+
"ink": "^6.6.0",
|
|
45
|
+
"meow": "^14.0.0",
|
|
46
|
+
"pump": "^3.0.3",
|
|
47
|
+
"simple-zstd": "^2.0.0",
|
|
48
|
+
"tar-stream": "^3.1.7"
|
|
49
|
+
},
|
|
50
|
+
"devDependencies": {
|
|
51
|
+
"@types/node": "^25.0.5",
|
|
52
|
+
"@types/react": "^19.2.7",
|
|
53
|
+
"tsx": "^4.21.0",
|
|
54
|
+
"typescript": "^5.9.3"
|
|
55
|
+
}
|
|
56
|
+
}
|