@hasindu---7/ff-link-extract 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +58 -0
- package/ff_extract_links.js +301 -0
- package/package.json +23 -0
package/README.md
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
# ff-link-extract
|
|
2
|
+
|
|
3
|
+
CLI to extract direct `https://fuckingfast.co/dl/...` links from fuckingfast page URLs.
|
|
4
|
+
|
|
5
|
+
## Install
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install -g @hasindu---7/ff-link-extract
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Usage
|
|
12
|
+
|
|
13
|
+
```bash
|
|
14
|
+
ff-link-extract -i urls.txt -o all-direct-links
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
## Input formats supported
|
|
18
|
+
|
|
19
|
+
- One URL per line.
|
|
20
|
+
- Raw HTML (for example copied `<a href="...">` blocks).
|
|
21
|
+
- Mixed text that contains fuckingfast URLs.
|
|
22
|
+
|
|
23
|
+
## Output files
|
|
24
|
+
|
|
25
|
+
With `-o all-direct-links`, it creates:
|
|
26
|
+
|
|
27
|
+
- `all-direct-links.txt` - direct links only (one per line)
|
|
28
|
+
- `all-direct-links.html` - clickable links page
|
|
29
|
+
- `all-direct-links.json` - detailed report with success/failure per source URL
|
|
30
|
+
|
|
31
|
+
## Options
|
|
32
|
+
|
|
33
|
+
- `-i, --input <file>`: input file path (default: `urls.txt`)
|
|
34
|
+
- `-o, --output-prefix <name>`: output file prefix (default: `direct-links`)
|
|
35
|
+
- `-h, --help`: show help
|
|
36
|
+
- `-v, --version`: show version
|
|
37
|
+
|
|
38
|
+
## Local development
|
|
39
|
+
|
|
40
|
+
Run directly without installing globally:
|
|
41
|
+
|
|
42
|
+
```bash
|
|
43
|
+
node ff_extract_links.js -i urls.txt -o all-direct-links
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
Or test as package command locally:
|
|
47
|
+
|
|
48
|
+
```bash
|
|
49
|
+
npm link
|
|
50
|
+
ff-link-extract -i urls.txt -o all-direct-links
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
## Publish to npm
|
|
54
|
+
|
|
55
|
+
1. `npm login`
|
|
56
|
+
2. `npm publish --access public`
|
|
57
|
+
|
|
58
|
+
If the package name is already taken, change `name` in `package.json` first.
|
|
@@ -0,0 +1,301 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
const fs = require('fs');
|
|
4
|
+
const path = require('path');
|
|
5
|
+
|
|
6
|
+
function parseArgs(argv) {
|
|
7
|
+
const args = { input: 'urls.txt', outputPrefix: 'direct-links', help: false, version: false };
|
|
8
|
+
|
|
9
|
+
for (let i = 2; i < argv.length; i += 1) {
|
|
10
|
+
const arg = argv[i];
|
|
11
|
+
if (arg === '-h' || arg === '--help') {
|
|
12
|
+
args.help = true;
|
|
13
|
+
continue;
|
|
14
|
+
}
|
|
15
|
+
if (arg === '-v' || arg === '--version') {
|
|
16
|
+
args.version = true;
|
|
17
|
+
continue;
|
|
18
|
+
}
|
|
19
|
+
if ((arg === '-i' || arg === '--input') && argv[i + 1]) {
|
|
20
|
+
args.input = argv[i + 1];
|
|
21
|
+
i += 1;
|
|
22
|
+
continue;
|
|
23
|
+
}
|
|
24
|
+
if ((arg === '-o' || arg === '--output-prefix') && argv[i + 1]) {
|
|
25
|
+
args.outputPrefix = argv[i + 1];
|
|
26
|
+
i += 1;
|
|
27
|
+
continue;
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
return args;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
function readPackageVersion() {
|
|
35
|
+
try {
|
|
36
|
+
const packagePath = path.resolve(__dirname, 'package.json');
|
|
37
|
+
if (!fs.existsSync(packagePath)) {
|
|
38
|
+
return 'dev';
|
|
39
|
+
}
|
|
40
|
+
const pkg = JSON.parse(fs.readFileSync(packagePath, 'utf8'));
|
|
41
|
+
return pkg.version || 'dev';
|
|
42
|
+
} catch {
|
|
43
|
+
return 'dev';
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
function printHelp() {
|
|
48
|
+
console.log('ff-link-extract');
|
|
49
|
+
console.log('Extract direct https://fuckingfast.co/dl/... links from fuckingfast page URLs.');
|
|
50
|
+
console.log('');
|
|
51
|
+
console.log('Usage:');
|
|
52
|
+
console.log(' ff-link-extract -i urls.txt -o all-direct-links');
|
|
53
|
+
console.log('');
|
|
54
|
+
console.log('Options:');
|
|
55
|
+
console.log(' -i, --input <file> Input file (plain URLs or raw HTML/text). Default: urls.txt');
|
|
56
|
+
console.log(' -o, --output-prefix <name> Output prefix for .txt/.html/.json files. Default: direct-links');
|
|
57
|
+
console.log(' -h, --help Show this help');
|
|
58
|
+
console.log(' -v, --version Show version');
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
function sanitizeLine(line) {
|
|
62
|
+
return line.replace(/^\uFEFF/, '').trim();
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
function normalizeExtractedUrl(raw) {
|
|
66
|
+
return raw
|
|
67
|
+
.replace(/&/gi, '&')
|
|
68
|
+
.replace(/["'<>]+$/g, '')
|
|
69
|
+
.trim();
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
function extractSourceUrls(inputText) {
|
|
73
|
+
const allMatches = inputText.match(/https?:\/\/fuckingfast\.co\/[^\s"'<>]+/gi) || [];
|
|
74
|
+
const cleaned = allMatches.map(normalizeExtractedUrl).filter(Boolean);
|
|
75
|
+
|
|
76
|
+
// Preserve order while removing duplicates.
|
|
77
|
+
return [...new Set(cleaned)];
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
function escapeHtml(str) {
|
|
81
|
+
return str
|
|
82
|
+
.replace(/&/g, '&')
|
|
83
|
+
.replace(/</g, '<')
|
|
84
|
+
.replace(/>/g, '>')
|
|
85
|
+
.replace(/"/g, '"')
|
|
86
|
+
.replace(/'/g, ''');
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
function extractFilenameFromUrl(rawUrl) {
|
|
90
|
+
try {
|
|
91
|
+
const u = new URL(rawUrl);
|
|
92
|
+
if (u.hash && u.hash.length > 1) {
|
|
93
|
+
return decodeURIComponent(u.hash.slice(1));
|
|
94
|
+
}
|
|
95
|
+
return u.pathname.split('/').filter(Boolean).pop() || rawUrl;
|
|
96
|
+
} catch {
|
|
97
|
+
return rawUrl;
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
function extractDirectDownload(html) {
|
|
102
|
+
// Primary pattern used on the sample pages.
|
|
103
|
+
const openPattern = /window\.open\(\s*["'](https:\/\/fuckingfast\.co\/dl\/[A-Za-z0-9_\-]+)["']\s*\)/i;
|
|
104
|
+
const m1 = html.match(openPattern);
|
|
105
|
+
if (m1) {
|
|
106
|
+
return m1[1];
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// Fallback in case the download URL appears elsewhere in the source.
|
|
110
|
+
const fallback = html.match(/https:\/\/fuckingfast\.co\/dl\/[A-Za-z0-9_\-]+/i);
|
|
111
|
+
if (fallback) {
|
|
112
|
+
return fallback[0];
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
return null;
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
async function fetchHtml(url) {
|
|
119
|
+
const res = await fetch(url, {
|
|
120
|
+
headers: {
|
|
121
|
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36',
|
|
122
|
+
accept: 'text/html,application/xhtml+xml',
|
|
123
|
+
},
|
|
124
|
+
redirect: 'follow',
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
if (!res.ok) {
|
|
128
|
+
throw new Error(`HTTP ${res.status}`);
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
return await res.text();
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
async function main() {
|
|
135
|
+
const { input, outputPrefix, help, version } = parseArgs(process.argv);
|
|
136
|
+
|
|
137
|
+
if (help) {
|
|
138
|
+
printHelp();
|
|
139
|
+
return;
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
if (version) {
|
|
143
|
+
console.log(readPackageVersion());
|
|
144
|
+
return;
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
const cwd = process.cwd();
|
|
148
|
+
const inputPath = path.resolve(cwd, input);
|
|
149
|
+
|
|
150
|
+
if (!fs.existsSync(inputPath)) {
|
|
151
|
+
console.error(`Input file not found: ${inputPath}`);
|
|
152
|
+
console.error('Create a text file with one fuckingfast page URL per line.');
|
|
153
|
+
process.exit(1);
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
const rawInput = fs.readFileSync(inputPath, 'utf8');
|
|
157
|
+
let urls = extractSourceUrls(rawInput);
|
|
158
|
+
|
|
159
|
+
// Fallback for old format: one URL per line (with optional comments).
|
|
160
|
+
if (urls.length === 0) {
|
|
161
|
+
const rawLines = rawInput.split(/\r?\n/);
|
|
162
|
+
urls = rawLines.map(sanitizeLine).filter((line) => line && !line.startsWith('#'));
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
if (urls.length === 0) {
|
|
166
|
+
console.error('No URLs found in input file.');
|
|
167
|
+
process.exit(1);
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
console.log(`Found ${urls.length} source URL(s) in ${path.basename(inputPath)}.`);
|
|
171
|
+
console.log(`Processing ${urls.length} URL(s)...`);
|
|
172
|
+
|
|
173
|
+
const results = [];
|
|
174
|
+
|
|
175
|
+
for (let i = 0; i < urls.length; i += 1) {
|
|
176
|
+
const sourceUrl = urls[i];
|
|
177
|
+
const name = extractFilenameFromUrl(sourceUrl);
|
|
178
|
+
process.stdout.write(`[${i + 1}/${urls.length}] ${name} ... `);
|
|
179
|
+
|
|
180
|
+
try {
|
|
181
|
+
const html = await fetchHtml(sourceUrl);
|
|
182
|
+
const direct = extractDirectDownload(html);
|
|
183
|
+
|
|
184
|
+
if (!direct) {
|
|
185
|
+
results.push({ sourceUrl, name, direct: null, error: 'Download URL not found in HTML' });
|
|
186
|
+
console.log('NOT FOUND');
|
|
187
|
+
continue;
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
results.push({ sourceUrl, name, direct, error: null });
|
|
191
|
+
console.log('OK');
|
|
192
|
+
} catch (err) {
|
|
193
|
+
results.push({ sourceUrl, name, direct: null, error: err.message || String(err) });
|
|
194
|
+
console.log(`ERROR (${err.message || String(err)})`);
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
const txtLines = [];
|
|
199
|
+
const htmlRows = [];
|
|
200
|
+
|
|
201
|
+
for (const row of results) {
|
|
202
|
+
if (row.direct) {
|
|
203
|
+
txtLines.push(row.direct);
|
|
204
|
+
htmlRows.push(`<li><a href="${escapeHtml(row.direct)}" target="_blank" rel="noopener noreferrer">${escapeHtml(row.name)}</a></li>`);
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
const txtOut = path.resolve(cwd, `${outputPrefix}.txt`);
|
|
209
|
+
const htmlOut = path.resolve(cwd, `${outputPrefix}.html`);
|
|
210
|
+
const jsonOut = path.resolve(cwd, `${outputPrefix}.json`);
|
|
211
|
+
|
|
212
|
+
fs.writeFileSync(txtOut, txtLines.join('\n') + (txtLines.length ? '\n' : ''), 'utf8');
|
|
213
|
+
|
|
214
|
+
const htmlDoc = `<!doctype html>
|
|
215
|
+
<html lang="en">
|
|
216
|
+
<head>
|
|
217
|
+
<meta charset="utf-8" />
|
|
218
|
+
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
|
219
|
+
<title>FuckingFast Direct Links</title>
|
|
220
|
+
<style>
|
|
221
|
+
:root {
|
|
222
|
+
color-scheme: light;
|
|
223
|
+
--bg: #f5f7fb;
|
|
224
|
+
--card: #ffffff;
|
|
225
|
+
--text: #111827;
|
|
226
|
+
--muted: #6b7280;
|
|
227
|
+
--accent: #0f766e;
|
|
228
|
+
}
|
|
229
|
+
body {
|
|
230
|
+
margin: 0;
|
|
231
|
+
font: 16px/1.5 Segoe UI, Arial, sans-serif;
|
|
232
|
+
color: var(--text);
|
|
233
|
+
background: radial-gradient(circle at top right, #dbeafe, transparent 40%), var(--bg);
|
|
234
|
+
}
|
|
235
|
+
.wrap {
|
|
236
|
+
max-width: 900px;
|
|
237
|
+
margin: 2rem auto;
|
|
238
|
+
padding: 1rem;
|
|
239
|
+
}
|
|
240
|
+
.card {
|
|
241
|
+
background: var(--card);
|
|
242
|
+
border-radius: 14px;
|
|
243
|
+
box-shadow: 0 10px 24px rgba(15, 23, 42, 0.08);
|
|
244
|
+
padding: 1.25rem;
|
|
245
|
+
}
|
|
246
|
+
h1 {
|
|
247
|
+
margin: 0 0 0.5rem;
|
|
248
|
+
font-size: 1.25rem;
|
|
249
|
+
}
|
|
250
|
+
p {
|
|
251
|
+
color: var(--muted);
|
|
252
|
+
margin-top: 0;
|
|
253
|
+
}
|
|
254
|
+
ul {
|
|
255
|
+
margin: 0;
|
|
256
|
+
padding-left: 1.2rem;
|
|
257
|
+
}
|
|
258
|
+
li {
|
|
259
|
+
margin: 0.45rem 0;
|
|
260
|
+
word-break: break-all;
|
|
261
|
+
}
|
|
262
|
+
a {
|
|
263
|
+
color: var(--accent);
|
|
264
|
+
text-decoration: none;
|
|
265
|
+
}
|
|
266
|
+
a:hover {
|
|
267
|
+
text-decoration: underline;
|
|
268
|
+
}
|
|
269
|
+
</style>
|
|
270
|
+
</head>
|
|
271
|
+
<body>
|
|
272
|
+
<main class="wrap">
|
|
273
|
+
<section class="card">
|
|
274
|
+
<h1>Extracted Direct Links (${txtLines.length}/${results.length})</h1>
|
|
275
|
+
<p>Open links in a new tab. Failed extractions are listed in the JSON report.</p>
|
|
276
|
+
<ul>
|
|
277
|
+
${htmlRows.join('\n ')}
|
|
278
|
+
</ul>
|
|
279
|
+
</section>
|
|
280
|
+
</main>
|
|
281
|
+
</body>
|
|
282
|
+
</html>
|
|
283
|
+
`;
|
|
284
|
+
|
|
285
|
+
fs.writeFileSync(htmlOut, htmlDoc, 'utf8');
|
|
286
|
+
fs.writeFileSync(jsonOut, JSON.stringify(results, null, 2) + '\n', 'utf8');
|
|
287
|
+
|
|
288
|
+
const okCount = results.filter((r) => r.direct).length;
|
|
289
|
+
const failCount = results.length - okCount;
|
|
290
|
+
|
|
291
|
+
console.log('');
|
|
292
|
+
console.log(`Done. Success: ${okCount}, Failed: ${failCount}`);
|
|
293
|
+
console.log(`Text links: ${txtOut}`);
|
|
294
|
+
console.log(`Clickable HTML: ${htmlOut}`);
|
|
295
|
+
console.log(`Full report: ${jsonOut}`);
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
main().catch((err) => {
|
|
299
|
+
console.error(err);
|
|
300
|
+
process.exit(1);
|
|
301
|
+
});
|
package/package.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@hasindu---7/ff-link-extract",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Extract direct fuckingfast download links from page URLs",
|
|
5
|
+
"bin": {
|
|
6
|
+
"ff-link-extract": "ff_extract_links.js"
|
|
7
|
+
},
|
|
8
|
+
"type": "commonjs",
|
|
9
|
+
"files": [
|
|
10
|
+
"ff_extract_links.js",
|
|
11
|
+
"README.md"
|
|
12
|
+
],
|
|
13
|
+
"keywords": [
|
|
14
|
+
"fuckingfast",
|
|
15
|
+
"link-extractor",
|
|
16
|
+
"cli",
|
|
17
|
+
"download"
|
|
18
|
+
],
|
|
19
|
+
"license": "MIT",
|
|
20
|
+
"engines": {
|
|
21
|
+
"node": ">=18"
|
|
22
|
+
}
|
|
23
|
+
}
|