stealth-fetch-plus 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +182 -0
- package/dist/client.d.ts +94 -0
- package/dist/client.js +969 -0
- package/dist/compat/web.d.ts +15 -0
- package/dist/compat/web.js +31 -0
- package/dist/connection-pool.d.ts +39 -0
- package/dist/connection-pool.js +84 -0
- package/dist/dns-cache.d.ts +25 -0
- package/dist/dns-cache.js +44 -0
- package/dist/http1/chunked.d.ts +35 -0
- package/dist/http1/chunked.js +87 -0
- package/dist/http1/client.d.ts +28 -0
- package/dist/http1/client.js +289 -0
- package/dist/http1/parser.d.ts +29 -0
- package/dist/http1/parser.js +78 -0
- package/dist/http2/client.d.ts +64 -0
- package/dist/http2/client.js +97 -0
- package/dist/http2/connection.d.ts +125 -0
- package/dist/http2/connection.js +666 -0
- package/dist/http2/constants.d.ts +72 -0
- package/dist/http2/constants.js +74 -0
- package/dist/http2/flow-control.d.ts +32 -0
- package/dist/http2/flow-control.js +76 -0
- package/dist/http2/framer.d.ts +47 -0
- package/dist/http2/framer.js +133 -0
- package/dist/http2/hpack.d.ts +54 -0
- package/dist/http2/hpack.js +186 -0
- package/dist/http2/parser.d.ts +35 -0
- package/dist/http2/parser.js +72 -0
- package/dist/http2/stream.d.ts +72 -0
- package/dist/http2/stream.js +252 -0
- package/dist/index.d.ts +18 -0
- package/dist/index.js +33 -0
- package/dist/protocol-cache.d.ts +14 -0
- package/dist/protocol-cache.js +29 -0
- package/dist/socket/adapter.d.ts +59 -0
- package/dist/socket/adapter.js +145 -0
- package/dist/socket/nat64.d.ts +69 -0
- package/dist/socket/nat64.js +196 -0
- package/dist/socket/tls.d.ts +28 -0
- package/dist/socket/tls.js +33 -0
- package/dist/socket/wasm-pkg/wasm_tls.d.ts +107 -0
- package/dist/socket/wasm-pkg/wasm_tls.js +568 -0
- package/dist/socket/wasm-pkg/wasm_tls_bg.wasm +0 -0
- package/dist/socket/wasm-pkg/wasm_tls_bg.wasm.d.ts +20 -0
- package/dist/socket/wasm-tls-adapter.d.ts +39 -0
- package/dist/socket/wasm-tls-adapter.js +97 -0
- package/dist/socket/wasm-tls-bridge.d.ts +30 -0
- package/dist/socket/wasm-tls-bridge.js +160 -0
- package/dist/utils/headers.d.ts +21 -0
- package/dist/utils/headers.js +36 -0
- package/dist/utils/url.d.ts +16 -0
- package/dist/utils/url.js +12 -0
- package/package.json +87 -0
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { HttpResponse } from '../client.js';
|
|
2
|
+
|
|
3
|
+
interface WebResponsePair {
|
|
4
|
+
response: Response;
|
|
5
|
+
clone: Response;
|
|
6
|
+
}
|
|
7
|
+
/**
|
|
8
|
+
* Convert a stealth-fetch HttpResponse into a standard Web Response.
|
|
9
|
+
* Note: Do not call HttpResponse.text/json/arrayBuffer before converting.
|
|
10
|
+
*/
|
|
11
|
+
declare function toWebResponse(response: HttpResponse, options?: {
|
|
12
|
+
tee?: boolean;
|
|
13
|
+
}): Response | WebResponsePair;
|
|
14
|
+
|
|
15
|
+
export { type WebResponsePair, toWebResponse };
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
function toWebResponse(response, options) {
|
|
2
|
+
if (response.body.locked) {
|
|
3
|
+
throw new Error("Cannot convert to Web Response: body stream is already locked/consumed");
|
|
4
|
+
}
|
|
5
|
+
const headers = new Headers();
|
|
6
|
+
if (response.rawHeaders && response.rawHeaders.length > 0) {
|
|
7
|
+
for (const [name, value] of response.rawHeaders) {
|
|
8
|
+
headers.append(name, value);
|
|
9
|
+
}
|
|
10
|
+
} else {
|
|
11
|
+
for (const [name, value] of Object.entries(response.headers)) {
|
|
12
|
+
headers.set(name, value);
|
|
13
|
+
}
|
|
14
|
+
}
|
|
15
|
+
const init = {
|
|
16
|
+
status: response.status,
|
|
17
|
+
statusText: response.statusText,
|
|
18
|
+
headers
|
|
19
|
+
};
|
|
20
|
+
if (options?.tee) {
|
|
21
|
+
const [a, b] = response.body.tee();
|
|
22
|
+
return {
|
|
23
|
+
response: new Response(a, init),
|
|
24
|
+
clone: new Response(b, init)
|
|
25
|
+
};
|
|
26
|
+
}
|
|
27
|
+
return new Response(response.body, init);
|
|
28
|
+
}
|
|
29
|
+
export {
|
|
30
|
+
toWebResponse
|
|
31
|
+
};
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import { Http2Client } from './http2/client.js';
|
|
2
|
+
import './utils/url.js';
|
|
3
|
+
import './http2/connection.js';
|
|
4
|
+
import 'node:buffer';
|
|
5
|
+
import 'node:events';
|
|
6
|
+
import 'node:stream';
|
|
7
|
+
import './http2/stream.js';
|
|
8
|
+
import './http2/constants.js';
|
|
9
|
+
import './http2/flow-control.js';
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* V8 isolate-level HTTP/2 connection pool.
|
|
13
|
+
* Reuses H2 connections to the same origin, leveraging HTTP/2 stream
|
|
14
|
+
* multiplexing to avoid redundant TCP + TLS + SETTINGS handshakes.
|
|
15
|
+
*
|
|
16
|
+
* Pool lives in the global scope of the V8 isolate.
|
|
17
|
+
* CF Workers may reuse isolates across requests, so pooled connections
|
|
18
|
+
* persist for the isolate's lifetime (subject to idle timeout).
|
|
19
|
+
*/
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Get a pooled H2 client for the given origin.
|
|
23
|
+
* Returns null if no usable connection exists.
|
|
24
|
+
*/
|
|
25
|
+
declare function getPooledClient(hostname: string, port: number, connectHostname?: string): Http2Client | null;
|
|
26
|
+
/**
|
|
27
|
+
* Add an H2 client to the pool for reuse.
|
|
28
|
+
*/
|
|
29
|
+
declare function poolClient(hostname: string, port: number, client: Http2Client, connectHostname?: string): void;
|
|
30
|
+
/**
|
|
31
|
+
* Remove a client from the pool (e.g. on GOAWAY or error).
|
|
32
|
+
*/
|
|
33
|
+
declare function removePooled(hostname: string, port: number, connectHostname?: string): void;
|
|
34
|
+
/**
|
|
35
|
+
* Clear all pooled connections. Useful for testing.
|
|
36
|
+
*/
|
|
37
|
+
declare function clearPool(): void;
|
|
38
|
+
|
|
39
|
+
export { clearPool, getPooledClient, poolClient, removePooled };
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
const POOL_TTL = 6e4;
|
|
2
|
+
const MAX_POOL_SIZE = 20;
|
|
3
|
+
const pool = /* @__PURE__ */ new Map();
|
|
4
|
+
const goawayRegistered = /* @__PURE__ */ new WeakSet();
|
|
5
|
+
function makeKey(hostname, port, connectHostname) {
|
|
6
|
+
return connectHostname ? `${hostname}:${port}@${connectHostname}` : `${hostname}:${port}`;
|
|
7
|
+
}
|
|
8
|
+
function getPooledClient(hostname, port, connectHostname) {
|
|
9
|
+
const key = makeKey(hostname, port, connectHostname);
|
|
10
|
+
const entry = pool.get(key);
|
|
11
|
+
if (!entry) return null;
|
|
12
|
+
if (Date.now() - entry.lastUsedAt > POOL_TTL) {
|
|
13
|
+
pool.delete(key);
|
|
14
|
+
entry.client.close().catch(() => {
|
|
15
|
+
});
|
|
16
|
+
return null;
|
|
17
|
+
}
|
|
18
|
+
if (!entry.client.hasCapacity) {
|
|
19
|
+
pool.delete(key);
|
|
20
|
+
entry.client.close().catch(() => {
|
|
21
|
+
});
|
|
22
|
+
return null;
|
|
23
|
+
}
|
|
24
|
+
entry.lastUsedAt = Date.now();
|
|
25
|
+
return entry.client;
|
|
26
|
+
}
|
|
27
|
+
function poolClient(hostname, port, client, connectHostname) {
|
|
28
|
+
const key = makeKey(hostname, port, connectHostname);
|
|
29
|
+
if (pool.size >= MAX_POOL_SIZE && !pool.has(key)) {
|
|
30
|
+
let oldestKey = null;
|
|
31
|
+
let oldestTime = Infinity;
|
|
32
|
+
for (const [k, v] of pool) {
|
|
33
|
+
if (v.lastUsedAt < oldestTime) {
|
|
34
|
+
oldestTime = v.lastUsedAt;
|
|
35
|
+
oldestKey = k;
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
if (oldestKey) {
|
|
39
|
+
const evicted = pool.get(oldestKey);
|
|
40
|
+
pool.delete(oldestKey);
|
|
41
|
+
evicted?.client.close().catch(() => {
|
|
42
|
+
});
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
const existing = pool.get(key);
|
|
46
|
+
if (existing && existing.client !== client) {
|
|
47
|
+
existing.client.close().catch(() => {
|
|
48
|
+
});
|
|
49
|
+
}
|
|
50
|
+
if (!goawayRegistered.has(client)) {
|
|
51
|
+
goawayRegistered.add(client);
|
|
52
|
+
client.onGoaway(() => {
|
|
53
|
+
const entry = pool.get(key);
|
|
54
|
+
if (entry?.client === client) {
|
|
55
|
+
pool.delete(key);
|
|
56
|
+
client.close().catch(() => {
|
|
57
|
+
});
|
|
58
|
+
}
|
|
59
|
+
});
|
|
60
|
+
}
|
|
61
|
+
pool.set(key, { client, lastUsedAt: Date.now() });
|
|
62
|
+
}
|
|
63
|
+
function removePooled(hostname, port, connectHostname) {
|
|
64
|
+
const key = makeKey(hostname, port, connectHostname);
|
|
65
|
+
const entry = pool.get(key);
|
|
66
|
+
if (entry) {
|
|
67
|
+
pool.delete(key);
|
|
68
|
+
entry.client.close().catch(() => {
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
function clearPool() {
|
|
73
|
+
for (const entry of pool.values()) {
|
|
74
|
+
entry.client.close().catch(() => {
|
|
75
|
+
});
|
|
76
|
+
}
|
|
77
|
+
pool.clear();
|
|
78
|
+
}
|
|
79
|
+
export {
|
|
80
|
+
clearPool,
|
|
81
|
+
getPooledClient,
|
|
82
|
+
poolClient,
|
|
83
|
+
removePooled
|
|
84
|
+
};
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import { CfCheckResult } from './socket/nat64.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* V8 isolate-level DNS cache for CF CDN detection results.
|
|
5
|
+
* Caches resolveAndCheckCloudflare() results (IPv4/IPv6 + isCf flag)
|
|
6
|
+
* to avoid repeated DoH queries for the same hostname.
|
|
7
|
+
*
|
|
8
|
+
* Uses the same LRU pattern as protocol-cache.ts (Map delete+re-insert).
|
|
9
|
+
* TTL is derived from DNS response TTL values (clamped to 30s–5min).
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Look up cached DNS result. Returns null on miss or expiry.
|
|
14
|
+
* On hit, refreshes LRU position (Map delete+re-insert).
|
|
15
|
+
*/
|
|
16
|
+
declare function getCachedDns(hostname: string): CfCheckResult | null;
|
|
17
|
+
/**
|
|
18
|
+
* Store DNS result in cache with clamped TTL.
|
|
19
|
+
* Evicts oldest entry if at capacity.
|
|
20
|
+
*/
|
|
21
|
+
declare function setCachedDns(hostname: string, result: CfCheckResult): void;
|
|
22
|
+
/** Clear all cached DNS entries. Exported for testing and user API. */
|
|
23
|
+
declare function clearDnsCache(): void;
|
|
24
|
+
|
|
25
|
+
export { clearDnsCache, getCachedDns, setCachedDns };
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
const MIN_TTL = 3e4;
|
|
2
|
+
const MAX_TTL = 5 * 6e4;
|
|
3
|
+
const MAX_ENTRIES = 500;
|
|
4
|
+
const cache = /* @__PURE__ */ new Map();
|
|
5
|
+
function getCachedDns(hostname) {
|
|
6
|
+
const entry = cache.get(hostname);
|
|
7
|
+
if (!entry) return null;
|
|
8
|
+
if (Date.now() > entry.expiresAt) {
|
|
9
|
+
cache.delete(hostname);
|
|
10
|
+
return null;
|
|
11
|
+
}
|
|
12
|
+
cache.delete(hostname);
|
|
13
|
+
cache.set(hostname, entry);
|
|
14
|
+
return {
|
|
15
|
+
isCf: entry.isCf,
|
|
16
|
+
ipv4: entry.ipv4,
|
|
17
|
+
ipv6: entry.ipv6,
|
|
18
|
+
dnsMs: 0,
|
|
19
|
+
// cached — no actual DNS query
|
|
20
|
+
ttl: Math.max(0, Math.round((entry.expiresAt - Date.now()) / 1e3))
|
|
21
|
+
};
|
|
22
|
+
}
|
|
23
|
+
function setCachedDns(hostname, result) {
|
|
24
|
+
const ttlMs = result.ttl > 0 ? Math.max(MIN_TTL, Math.min(result.ttl * 1e3, MAX_TTL)) : 1e4;
|
|
25
|
+
if (cache.size >= MAX_ENTRIES && !cache.has(hostname)) {
|
|
26
|
+
const firstKey = cache.keys().next().value;
|
|
27
|
+
if (firstKey) cache.delete(firstKey);
|
|
28
|
+
}
|
|
29
|
+
cache.set(hostname, {
|
|
30
|
+
ipv4: result.ipv4,
|
|
31
|
+
ipv6: result.ipv6,
|
|
32
|
+
isCf: result.isCf,
|
|
33
|
+
expiresAt: Date.now() + ttlMs,
|
|
34
|
+
dnsMs: result.dnsMs
|
|
35
|
+
});
|
|
36
|
+
}
|
|
37
|
+
function clearDnsCache() {
|
|
38
|
+
cache.clear();
|
|
39
|
+
}
|
|
40
|
+
export {
|
|
41
|
+
clearDnsCache,
|
|
42
|
+
getCachedDns,
|
|
43
|
+
setCachedDns
|
|
44
|
+
};
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import { Buffer } from 'node:buffer';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* HTTP/1.1 chunked transfer encoding decoder.
|
|
5
|
+
* Decodes chunked body data into raw content.
|
|
6
|
+
*
|
|
7
|
+
* Chunked format:
|
|
8
|
+
* <hex-size>\r\n
|
|
9
|
+
* <data>\r\n
|
|
10
|
+
* ...
|
|
11
|
+
* 0\r\n
|
|
12
|
+
* \r\n
|
|
13
|
+
*/
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* Stateful chunked transfer encoding decoder.
|
|
17
|
+
* Feed raw data via feed(), collect decoded chunks via getChunks().
|
|
18
|
+
*/
|
|
19
|
+
declare class ChunkedDecoder {
|
|
20
|
+
private state;
|
|
21
|
+
private buffer;
|
|
22
|
+
private currentChunkSize;
|
|
23
|
+
private chunks;
|
|
24
|
+
private _done;
|
|
25
|
+
/** Whether all chunks have been received (final 0-length chunk) */
|
|
26
|
+
get done(): boolean;
|
|
27
|
+
/** Feed raw data into the decoder */
|
|
28
|
+
feed(data: Buffer | Uint8Array): void;
|
|
29
|
+
/** Get and clear decoded chunks */
|
|
30
|
+
getChunks(): Buffer[];
|
|
31
|
+
private process;
|
|
32
|
+
private findCRLF;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
export { ChunkedDecoder };
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import { Buffer } from "node:buffer";
|
|
2
|
+
var ChunkedState = /* @__PURE__ */ ((ChunkedState2) => {
|
|
3
|
+
ChunkedState2[ChunkedState2["READ_SIZE"] = 0] = "READ_SIZE";
|
|
4
|
+
ChunkedState2[ChunkedState2["READ_DATA"] = 1] = "READ_DATA";
|
|
5
|
+
ChunkedState2[ChunkedState2["READ_DATA_CRLF"] = 2] = "READ_DATA_CRLF";
|
|
6
|
+
ChunkedState2[ChunkedState2["DONE"] = 3] = "DONE";
|
|
7
|
+
return ChunkedState2;
|
|
8
|
+
})(ChunkedState || {});
|
|
9
|
+
class ChunkedDecoder {
|
|
10
|
+
state = 0 /* READ_SIZE */;
|
|
11
|
+
buffer = Buffer.alloc(0);
|
|
12
|
+
currentChunkSize = 0;
|
|
13
|
+
chunks = [];
|
|
14
|
+
_done = false;
|
|
15
|
+
/** Whether all chunks have been received (final 0-length chunk) */
|
|
16
|
+
get done() {
|
|
17
|
+
return this._done;
|
|
18
|
+
}
|
|
19
|
+
/** Feed raw data into the decoder */
|
|
20
|
+
feed(data) {
|
|
21
|
+
const buf = Buffer.isBuffer(data) ? data : Buffer.from(data);
|
|
22
|
+
this.buffer = this.buffer.length > 0 ? Buffer.concat([this.buffer, buf]) : buf;
|
|
23
|
+
this.process();
|
|
24
|
+
}
|
|
25
|
+
/** Get and clear decoded chunks */
|
|
26
|
+
getChunks() {
|
|
27
|
+
const result = this.chunks;
|
|
28
|
+
this.chunks = [];
|
|
29
|
+
return result;
|
|
30
|
+
}
|
|
31
|
+
process() {
|
|
32
|
+
while (this.buffer.length > 0 && !this._done) {
|
|
33
|
+
switch (this.state) {
|
|
34
|
+
case 0 /* READ_SIZE */: {
|
|
35
|
+
const crlfIdx = this.findCRLF();
|
|
36
|
+
if (crlfIdx === -1) return;
|
|
37
|
+
const sizeLine = this.buffer.subarray(0, crlfIdx).toString("ascii");
|
|
38
|
+
const semiIdx = sizeLine.indexOf(";");
|
|
39
|
+
const sizeStr = semiIdx === -1 ? sizeLine : sizeLine.substring(0, semiIdx);
|
|
40
|
+
this.currentChunkSize = parseInt(sizeStr.trim(), 16);
|
|
41
|
+
if (isNaN(this.currentChunkSize) || this.currentChunkSize < 0) {
|
|
42
|
+
throw new Error(`Invalid chunk size: "${sizeStr.trim()}"`);
|
|
43
|
+
}
|
|
44
|
+
if (this.currentChunkSize > 16 * 1024 * 1024) {
|
|
45
|
+
throw new Error(`Chunk size too large: ${this.currentChunkSize}`);
|
|
46
|
+
}
|
|
47
|
+
this.buffer = this.buffer.subarray(crlfIdx + 2);
|
|
48
|
+
if (this.currentChunkSize === 0) {
|
|
49
|
+
this._done = true;
|
|
50
|
+
return;
|
|
51
|
+
}
|
|
52
|
+
this.state = 1 /* READ_DATA */;
|
|
53
|
+
break;
|
|
54
|
+
}
|
|
55
|
+
case 1 /* READ_DATA */: {
|
|
56
|
+
if (this.buffer.length < this.currentChunkSize) return;
|
|
57
|
+
this.chunks.push(this.buffer.subarray(0, this.currentChunkSize));
|
|
58
|
+
this.buffer = this.buffer.subarray(this.currentChunkSize);
|
|
59
|
+
this.state = 2 /* READ_DATA_CRLF */;
|
|
60
|
+
break;
|
|
61
|
+
}
|
|
62
|
+
case 2 /* READ_DATA_CRLF */: {
|
|
63
|
+
if (this.buffer.length < 2) return;
|
|
64
|
+
if (this.buffer[0] !== 13 || this.buffer[1] !== 10) {
|
|
65
|
+
throw new Error("Expected CRLF after chunk data");
|
|
66
|
+
}
|
|
67
|
+
this.buffer = this.buffer.subarray(2);
|
|
68
|
+
this.state = 0 /* READ_SIZE */;
|
|
69
|
+
break;
|
|
70
|
+
}
|
|
71
|
+
case 3 /* DONE */:
|
|
72
|
+
return;
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
findCRLF() {
|
|
77
|
+
for (let i = 0; i < this.buffer.length - 1; i++) {
|
|
78
|
+
if (this.buffer[i] === 13 && this.buffer[i + 1] === 10) {
|
|
79
|
+
return i;
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
return -1;
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
export {
|
|
86
|
+
ChunkedDecoder
|
|
87
|
+
};
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import { Duplex } from 'node:stream';
|
|
2
|
+
|
|
3
|
+
interface Http1Request {
|
|
4
|
+
method: string;
|
|
5
|
+
path: string;
|
|
6
|
+
hostname: string;
|
|
7
|
+
headers: Record<string, string>;
|
|
8
|
+
body?: Uint8Array | ReadableStream<Uint8Array> | null;
|
|
9
|
+
signal?: AbortSignal;
|
|
10
|
+
/** Timeout waiting for response headers (ms) */
|
|
11
|
+
headersTimeout?: number;
|
|
12
|
+
/** Timeout waiting for response body data (ms) */
|
|
13
|
+
bodyTimeout?: number;
|
|
14
|
+
}
|
|
15
|
+
interface Http1Response {
|
|
16
|
+
status: number;
|
|
17
|
+
statusText: string;
|
|
18
|
+
headers: Record<string, string>;
|
|
19
|
+
rawHeaders: Array<[string, string]>;
|
|
20
|
+
protocol: "http/1.1";
|
|
21
|
+
body: ReadableStream<Uint8Array>;
|
|
22
|
+
}
|
|
23
|
+
/**
|
|
24
|
+
* Send an HTTP/1.1 request over a raw socket and return the response.
|
|
25
|
+
*/
|
|
26
|
+
declare function http1Request(socket: Duplex, request: Http1Request): Promise<Http1Response>;
|
|
27
|
+
|
|
28
|
+
export { type Http1Request, type Http1Response, http1Request };
|
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
import { Buffer } from "node:buffer";
|
|
2
|
+
import { serializeHttp1Headers } from "../utils/headers.js";
|
|
3
|
+
import { parseResponseHead } from "./parser.js";
|
|
4
|
+
import { ChunkedDecoder } from "./chunked.js";
|
|
5
|
+
async function http1Request(socket, request) {
|
|
6
|
+
if (request.signal?.aborted) {
|
|
7
|
+
throw request.signal.reason ?? new DOMException("Aborted", "AbortError");
|
|
8
|
+
}
|
|
9
|
+
const reqHeaders = { ...request.headers };
|
|
10
|
+
if (!reqHeaders["host"]) {
|
|
11
|
+
reqHeaders["host"] = request.hostname;
|
|
12
|
+
}
|
|
13
|
+
if (!reqHeaders["connection"]) {
|
|
14
|
+
reqHeaders["connection"] = "close";
|
|
15
|
+
}
|
|
16
|
+
if (!reqHeaders["user-agent"]) {
|
|
17
|
+
reqHeaders["user-agent"] = "stealth-fetch/0.1";
|
|
18
|
+
}
|
|
19
|
+
if (reqHeaders["transfer-encoding"] && reqHeaders["content-length"]) {
|
|
20
|
+
delete reqHeaders["content-length"];
|
|
21
|
+
}
|
|
22
|
+
const isStreamBody = request.body instanceof ReadableStream;
|
|
23
|
+
if (isStreamBody) {
|
|
24
|
+
if (!reqHeaders["transfer-encoding"]) {
|
|
25
|
+
reqHeaders["transfer-encoding"] = "chunked";
|
|
26
|
+
}
|
|
27
|
+
delete reqHeaders["content-length"];
|
|
28
|
+
} else if (request.body && !reqHeaders["content-length"]) {
|
|
29
|
+
reqHeaders["content-length"] = String(request.body.byteLength);
|
|
30
|
+
}
|
|
31
|
+
const requestLine = `${request.method.toUpperCase()} ${request.path} HTTP/1.1\r
|
|
32
|
+
`;
|
|
33
|
+
const headersStr = serializeHttp1Headers(reqHeaders);
|
|
34
|
+
const head = `${requestLine + headersStr}\r
|
|
35
|
+
`;
|
|
36
|
+
await writeToSocket(socket, Buffer.from(head, "utf-8"));
|
|
37
|
+
if (isStreamBody && request.body instanceof ReadableStream) {
|
|
38
|
+
const reader = request.body.getReader();
|
|
39
|
+
try {
|
|
40
|
+
while (true) {
|
|
41
|
+
const { done, value } = await reader.read();
|
|
42
|
+
if (done) break;
|
|
43
|
+
const chunk = Buffer.from(value);
|
|
44
|
+
await writeToSocket(socket, Buffer.from(`${chunk.byteLength.toString(16)}\r
|
|
45
|
+
`));
|
|
46
|
+
await writeToSocket(socket, chunk);
|
|
47
|
+
await writeToSocket(socket, Buffer.from("\r\n"));
|
|
48
|
+
}
|
|
49
|
+
await writeToSocket(socket, Buffer.from("0\r\n\r\n"));
|
|
50
|
+
} catch (err) {
|
|
51
|
+
reader.cancel(err).catch(() => {
|
|
52
|
+
});
|
|
53
|
+
throw err;
|
|
54
|
+
} finally {
|
|
55
|
+
reader.releaseLock();
|
|
56
|
+
}
|
|
57
|
+
} else if (request.body && !isStreamBody && request.body.byteLength > 0) {
|
|
58
|
+
await writeToSocket(socket, Buffer.from(request.body));
|
|
59
|
+
}
|
|
60
|
+
return readResponse(socket, request.signal, request.headersTimeout, request.bodyTimeout);
|
|
61
|
+
}
|
|
62
|
+
function writeToSocket(socket, data) {
|
|
63
|
+
return new Promise((resolve, reject) => {
|
|
64
|
+
socket.write(data, (err) => {
|
|
65
|
+
if (err) reject(err);
|
|
66
|
+
else resolve();
|
|
67
|
+
});
|
|
68
|
+
});
|
|
69
|
+
}
|
|
70
|
+
function readResponse(socket, signal, headersTimeout, bodyTimeout) {
|
|
71
|
+
return new Promise((resolve, reject) => {
|
|
72
|
+
let headBuffer = Buffer.alloc(0);
|
|
73
|
+
let headParsed = false;
|
|
74
|
+
let parsed = null;
|
|
75
|
+
let bodyBytesReceived = 0;
|
|
76
|
+
let chunkedDecoder = null;
|
|
77
|
+
const hasHeadersTimeout = typeof headersTimeout === "number" && headersTimeout > 0 && headersTimeout < Infinity;
|
|
78
|
+
const hasBodyTimeout = typeof bodyTimeout === "number" && bodyTimeout > 0 && bodyTimeout < Infinity;
|
|
79
|
+
let bodyController = null;
|
|
80
|
+
let bodyStreamClosed = false;
|
|
81
|
+
let headersTimer = null;
|
|
82
|
+
let bodyTimer = null;
|
|
83
|
+
let lastBodyActivity = 0;
|
|
84
|
+
const bodyStream = new ReadableStream({
|
|
85
|
+
start(controller) {
|
|
86
|
+
bodyController = controller;
|
|
87
|
+
},
|
|
88
|
+
cancel() {
|
|
89
|
+
bodyStreamClosed = true;
|
|
90
|
+
cleanup();
|
|
91
|
+
}
|
|
92
|
+
});
|
|
93
|
+
const closeBody = () => {
|
|
94
|
+
if (bodyController && !bodyStreamClosed) {
|
|
95
|
+
bodyStreamClosed = true;
|
|
96
|
+
try {
|
|
97
|
+
bodyController.close();
|
|
98
|
+
} catch {
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
clearBodyTimer();
|
|
102
|
+
};
|
|
103
|
+
const enqueueBody = (data) => {
|
|
104
|
+
if (bodyController && !bodyStreamClosed) {
|
|
105
|
+
try {
|
|
106
|
+
bodyController.enqueue(data);
|
|
107
|
+
} catch {
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
};
|
|
111
|
+
const cleanup = () => {
|
|
112
|
+
socket.removeListener("data", onData);
|
|
113
|
+
socket.removeListener("end", onEnd);
|
|
114
|
+
socket.removeListener("error", onError);
|
|
115
|
+
if (signal) signal.removeEventListener("abort", onAbort);
|
|
116
|
+
clearHeadersTimer();
|
|
117
|
+
clearBodyTimer();
|
|
118
|
+
};
|
|
119
|
+
const onAbort = () => {
|
|
120
|
+
const err = signal?.reason ?? new DOMException("Aborted", "AbortError");
|
|
121
|
+
if (!headParsed) {
|
|
122
|
+
reject(err);
|
|
123
|
+
} else if (bodyController && !bodyStreamClosed) {
|
|
124
|
+
bodyStreamClosed = true;
|
|
125
|
+
try {
|
|
126
|
+
bodyController.error(err);
|
|
127
|
+
} catch {
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
cleanup();
|
|
131
|
+
socket.destroy();
|
|
132
|
+
};
|
|
133
|
+
const clearHeadersTimer = () => {
|
|
134
|
+
if (headersTimer) {
|
|
135
|
+
clearTimeout(headersTimer);
|
|
136
|
+
headersTimer = null;
|
|
137
|
+
}
|
|
138
|
+
};
|
|
139
|
+
const clearBodyTimer = () => {
|
|
140
|
+
if (bodyTimer) {
|
|
141
|
+
clearTimeout(bodyTimer);
|
|
142
|
+
bodyTimer = null;
|
|
143
|
+
}
|
|
144
|
+
};
|
|
145
|
+
const onHeadersTimeout = () => {
|
|
146
|
+
if (headParsed) return;
|
|
147
|
+
const err = new DOMException(`Headers timeout after ${headersTimeout}ms`, "TimeoutError");
|
|
148
|
+
reject(err);
|
|
149
|
+
cleanup();
|
|
150
|
+
socket.destroy();
|
|
151
|
+
};
|
|
152
|
+
const onBodyTimeoutCheck = () => {
|
|
153
|
+
bodyTimer = null;
|
|
154
|
+
if (!hasBodyTimeout) return;
|
|
155
|
+
const elapsed = Date.now() - lastBodyActivity;
|
|
156
|
+
if (elapsed >= bodyTimeout) {
|
|
157
|
+
const err = new DOMException(`Body timeout after ${bodyTimeout}ms`, "TimeoutError");
|
|
158
|
+
if (bodyController && !bodyStreamClosed) {
|
|
159
|
+
bodyStreamClosed = true;
|
|
160
|
+
try {
|
|
161
|
+
bodyController.error(err);
|
|
162
|
+
} catch {
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
cleanup();
|
|
166
|
+
socket.destroy();
|
|
167
|
+
return;
|
|
168
|
+
}
|
|
169
|
+
bodyTimer = setTimeout(onBodyTimeoutCheck, bodyTimeout - elapsed);
|
|
170
|
+
};
|
|
171
|
+
const markBodyActivity = () => {
|
|
172
|
+
if (!hasBodyTimeout) return;
|
|
173
|
+
lastBodyActivity = Date.now();
|
|
174
|
+
if (!bodyTimer) {
|
|
175
|
+
bodyTimer = setTimeout(onBodyTimeoutCheck, bodyTimeout);
|
|
176
|
+
}
|
|
177
|
+
};
|
|
178
|
+
const onData = (chunk) => {
|
|
179
|
+
const buf = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk);
|
|
180
|
+
if (!headParsed) {
|
|
181
|
+
headBuffer = headBuffer.length > 0 ? Buffer.concat([headBuffer, buf]) : buf;
|
|
182
|
+
if (headBuffer.length > 81920) {
|
|
183
|
+
reject(new Error("Response headers too large (>80KB)"));
|
|
184
|
+
cleanup();
|
|
185
|
+
return;
|
|
186
|
+
}
|
|
187
|
+
const result = parseResponseHead(headBuffer);
|
|
188
|
+
if (!result) return;
|
|
189
|
+
if (result.response.status === 100) {
|
|
190
|
+
headBuffer = headBuffer.subarray(result.bodyStart);
|
|
191
|
+
return;
|
|
192
|
+
}
|
|
193
|
+
headParsed = true;
|
|
194
|
+
parsed = result.response;
|
|
195
|
+
clearHeadersTimer();
|
|
196
|
+
resolve({
|
|
197
|
+
status: parsed.status,
|
|
198
|
+
statusText: parsed.statusText,
|
|
199
|
+
headers: parsed.headers,
|
|
200
|
+
rawHeaders: parsed.rawHeaders,
|
|
201
|
+
protocol: "http/1.1",
|
|
202
|
+
body: bodyStream
|
|
203
|
+
});
|
|
204
|
+
const bodyData = headBuffer.subarray(result.bodyStart);
|
|
205
|
+
headBuffer = Buffer.alloc(0);
|
|
206
|
+
if (parsed.bodyMode === "chunked") {
|
|
207
|
+
chunkedDecoder = new ChunkedDecoder();
|
|
208
|
+
}
|
|
209
|
+
if (hasBodyTimeout && !(parsed.bodyMode === "content-length" && parsed.contentLength === 0)) {
|
|
210
|
+
markBodyActivity();
|
|
211
|
+
}
|
|
212
|
+
if (bodyData.length > 0) {
|
|
213
|
+
processBodyData(bodyData);
|
|
214
|
+
}
|
|
215
|
+
if (parsed.bodyMode === "content-length" && parsed.contentLength === 0) {
|
|
216
|
+
closeBody();
|
|
217
|
+
cleanup();
|
|
218
|
+
}
|
|
219
|
+
} else {
|
|
220
|
+
processBodyData(buf);
|
|
221
|
+
}
|
|
222
|
+
};
|
|
223
|
+
const processBodyData = (data) => {
|
|
224
|
+
if (!parsed) return;
|
|
225
|
+
if (hasBodyTimeout) {
|
|
226
|
+
markBodyActivity();
|
|
227
|
+
}
|
|
228
|
+
if (parsed.bodyMode === "chunked" && chunkedDecoder) {
|
|
229
|
+
chunkedDecoder.feed(data);
|
|
230
|
+
for (const chunk of chunkedDecoder.getChunks()) {
|
|
231
|
+
enqueueBody(chunk);
|
|
232
|
+
}
|
|
233
|
+
if (chunkedDecoder.done) {
|
|
234
|
+
closeBody();
|
|
235
|
+
cleanup();
|
|
236
|
+
}
|
|
237
|
+
} else if (parsed.bodyMode === "content-length") {
|
|
238
|
+
const remaining = parsed.contentLength - bodyBytesReceived;
|
|
239
|
+
const toEnqueue = data.length <= remaining ? data : data.subarray(0, remaining);
|
|
240
|
+
bodyBytesReceived += toEnqueue.length;
|
|
241
|
+
enqueueBody(toEnqueue);
|
|
242
|
+
if (bodyBytesReceived >= parsed.contentLength) {
|
|
243
|
+
closeBody();
|
|
244
|
+
cleanup();
|
|
245
|
+
}
|
|
246
|
+
} else {
|
|
247
|
+
enqueueBody(data);
|
|
248
|
+
}
|
|
249
|
+
};
|
|
250
|
+
const onEnd = () => {
|
|
251
|
+
if (!headParsed) {
|
|
252
|
+
reject(new Error("Connection closed before response headers received"));
|
|
253
|
+
} else {
|
|
254
|
+
closeBody();
|
|
255
|
+
}
|
|
256
|
+
cleanup();
|
|
257
|
+
};
|
|
258
|
+
const onError = (err) => {
|
|
259
|
+
if (!headParsed) {
|
|
260
|
+
reject(err);
|
|
261
|
+
} else {
|
|
262
|
+
if (bodyController && !bodyStreamClosed) {
|
|
263
|
+
bodyStreamClosed = true;
|
|
264
|
+
try {
|
|
265
|
+
bodyController.error(err);
|
|
266
|
+
} catch {
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
cleanup();
|
|
271
|
+
};
|
|
272
|
+
socket.on("data", onData);
|
|
273
|
+
socket.on("end", onEnd);
|
|
274
|
+
socket.on("error", onError);
|
|
275
|
+
if (signal) {
|
|
276
|
+
if (signal.aborted) {
|
|
277
|
+
onAbort();
|
|
278
|
+
return;
|
|
279
|
+
}
|
|
280
|
+
signal.addEventListener("abort", onAbort, { once: true });
|
|
281
|
+
}
|
|
282
|
+
if (hasHeadersTimeout) {
|
|
283
|
+
headersTimer = setTimeout(onHeadersTimeout, headersTimeout);
|
|
284
|
+
}
|
|
285
|
+
});
|
|
286
|
+
}
|
|
287
|
+
export {
|
|
288
|
+
http1Request
|
|
289
|
+
};
|