@simonyea/holysheep-cli 2.1.40 → 2.1.41
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/configure-worker.js +4491 -0
- package/dist/index.js +9591 -0
- package/dist/process-proxy-inject.js +117 -0
- package/package.json +20 -7
- package/.gitea/workflows/sanity.yml +0 -125
- package/scripts/check-tarball-size.js +0 -44
- package/src/commands/balance.js +0 -57
- package/src/commands/claude-proxy.js +0 -248
- package/src/commands/claude.js +0 -135
- package/src/commands/doctor.js +0 -282
- package/src/commands/login.js +0 -211
- package/src/commands/openclaw.js +0 -258
- package/src/commands/reset.js +0 -53
- package/src/commands/setup.js +0 -493
- package/src/commands/upgrade.js +0 -168
- package/src/commands/webui.js +0 -622
- package/src/index.js +0 -226
- package/src/tools/aider.js +0 -78
- package/src/tools/antigravity.js +0 -42
- package/src/tools/claude-code.js +0 -228
- package/src/tools/claude-process-proxy.js +0 -1030
- package/src/tools/codex.js +0 -254
- package/src/tools/continue.js +0 -146
- package/src/tools/cursor.js +0 -71
- package/src/tools/droid.js +0 -281
- package/src/tools/env-config.js +0 -185
- package/src/tools/gemini-cli.js +0 -82
- package/src/tools/hermes.js +0 -354
- package/src/tools/index.js +0 -13
- package/src/tools/openclaw-bridge.js +0 -987
- package/src/tools/openclaw.js +0 -925
- package/src/tools/opencode.js +0 -227
- package/src/tools/process-proxy-inject.js +0 -142
- package/src/utils/config.js +0 -54
- package/src/utils/shell.js +0 -342
- package/src/utils/which.js +0 -176
- package/src/webui/aionui-runtime-fetcher.js +0 -429
- package/src/webui/aionui-runtime.js +0 -139
- package/src/webui/aionui-wrapper.js +0 -734
- package/src/webui/configure-worker.js +0 -67
- package/src/webui/server.js +0 -1572
- package/src/webui/workspace-runtime.js +0 -288
- package/src/webui/workspace-store.js +0 -325
- /package/{src/webui → dist}/index.html +0 -0
- /package/{src/tools → dist}/pty-hermes-wrapper.py +0 -0
|
@@ -1,1030 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env node
|
|
2
|
-
'use strict'
|
|
3
|
-
|
|
4
|
-
const fs = require('fs')
|
|
5
|
-
const http = require('http')
|
|
6
|
-
const https = require('https')
|
|
7
|
-
const net = require('net')
|
|
8
|
-
const path = require('path')
|
|
9
|
-
const os = require('os')
|
|
10
|
-
const crypto = require('crypto')
|
|
11
|
-
const { URL } = require('url')
|
|
12
|
-
const fetch = global.fetch || require('node-fetch')
|
|
13
|
-
|
|
14
|
-
const HOLYSHEEP_DIR = path.join(os.homedir(), '.holysheep')
|
|
15
|
-
const CONFIG_PATH = path.join(HOLYSHEEP_DIR, 'claude-proxy.json')
|
|
16
|
-
const DEFAULT_PROXY_PORT = 14556
|
|
17
|
-
|
|
18
|
-
function ensureDir() {
|
|
19
|
-
if (!fs.existsSync(HOLYSHEEP_DIR)) fs.mkdirSync(HOLYSHEEP_DIR, { recursive: true })
|
|
20
|
-
}
|
|
21
|
-
|
|
22
|
-
function readConfig() {
|
|
23
|
-
try {
|
|
24
|
-
return JSON.parse(fs.readFileSync(CONFIG_PATH, 'utf8'))
|
|
25
|
-
} catch {
|
|
26
|
-
return {}
|
|
27
|
-
}
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
function writeConfig(data) {
|
|
31
|
-
ensureDir()
|
|
32
|
-
fs.writeFileSync(CONFIG_PATH, JSON.stringify(data, null, 2), 'utf8')
|
|
33
|
-
}
|
|
34
|
-
|
|
35
|
-
function getProcessProxyPort(config = readConfig()) {
|
|
36
|
-
const value = Number(config.processProxyPort)
|
|
37
|
-
return Number.isInteger(value) && value > 0 ? value : DEFAULT_PROXY_PORT
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
function getLocalProxyUrl(port = getProcessProxyPort()) {
|
|
41
|
-
return `http://127.0.0.1:${port}`
|
|
42
|
-
}
|
|
43
|
-
|
|
44
|
-
function getControlPlaneUrl(config) {
|
|
45
|
-
return String(config.controlPlaneUrl || config.relayUrl || '').replace(/\/+$/, '')
|
|
46
|
-
}
|
|
47
|
-
|
|
48
|
-
const leaseCache = new Map()
|
|
49
|
-
const MAX_PROXY_RETRIES = 3
|
|
50
|
-
const ENABLE_TIMING_LOG = process.env.HS_CLAUDE_TIMING_LOG === '1'
|
|
51
|
-
const SLOW_PATH_LOG_MS = Number(process.env.HS_CLAUDE_SLOW_PATH_LOG_MS) || 5000
|
|
52
|
-
|
|
53
|
-
function sanitizeUrl(value) {
|
|
54
|
-
if (!value) return ''
|
|
55
|
-
try {
|
|
56
|
-
const url = new URL(String(value))
|
|
57
|
-
return `${url.protocol}//${url.host}${url.pathname}`
|
|
58
|
-
} catch {
|
|
59
|
-
return String(value)
|
|
60
|
-
}
|
|
61
|
-
}
|
|
62
|
-
|
|
63
|
-
function logProxyTiming(event, details = {}) {
|
|
64
|
-
if (!ENABLE_TIMING_LOG) return
|
|
65
|
-
const payload = Object.fromEntries(
|
|
66
|
-
Object.entries(details).filter(([, value]) => value !== undefined && value !== null && value !== '')
|
|
67
|
-
)
|
|
68
|
-
console.error(`[hs-claude-proxy] ${event} ${JSON.stringify(payload)}`)
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
function createForwardTrace({ clientReq, targetUrl, nodeProxyUrl, sessionId, lease, attempt, isDirect }) {
|
|
72
|
-
return {
|
|
73
|
-
requestId: crypto.randomUUID().slice(0, 8),
|
|
74
|
-
sessionId,
|
|
75
|
-
nodeId: lease?.nodeId || '',
|
|
76
|
-
attempt,
|
|
77
|
-
isDirect,
|
|
78
|
-
method: clientReq?.method || '',
|
|
79
|
-
target: sanitizeUrl(targetUrl),
|
|
80
|
-
nodeProxy: sanitizeUrl(nodeProxyUrl),
|
|
81
|
-
leaseOpenMs: lease?._hsLeaseOpenMs,
|
|
82
|
-
leaseAgeMs: lease?._hsLeaseOpenedAt ? Date.now() - lease._hsLeaseOpenedAt : undefined,
|
|
83
|
-
}
|
|
84
|
-
}
|
|
85
|
-
|
|
86
|
-
function createForwardError(statusCode, body) {
|
|
87
|
-
const error = new Error(`HTTP ${statusCode}: ${String(body || '').slice(0, 200)}`)
|
|
88
|
-
error.statusCode = Number(statusCode) || 502
|
|
89
|
-
error.body = String(body || '')
|
|
90
|
-
return error
|
|
91
|
-
}
|
|
92
|
-
|
|
93
|
-
function createClientValidationErrorBody(message) {
|
|
94
|
-
return JSON.stringify({
|
|
95
|
-
type: 'error',
|
|
96
|
-
error: {
|
|
97
|
-
type: 'client_validation_error',
|
|
98
|
-
message,
|
|
99
|
-
},
|
|
100
|
-
})
|
|
101
|
-
}
|
|
102
|
-
|
|
103
|
-
// [HolySheep fork v2.1.36 / hs24] This is the ONLY trigger for switching to a
|
|
104
|
-
// different node (via fetchFreshLease({forceReassign:true}) → control plane
|
|
105
|
-
// drops Redis stickiness → chooseNode round-robins). It is NOT a generic retry
|
|
106
|
-
// trigger — same-node retries don't need this.
|
|
107
|
-
//
|
|
108
|
-
// Previous versions returned true for any 403/503, which made the CLI tear
|
|
109
|
-
// down 5-min session stickiness on every transient upstream hiccup (busy
|
|
110
|
-
// account, momentary scheduler capacity, ticket timing) and bounced the
|
|
111
|
-
// session node3↔node4↔node3 repeatedly. Bug surfaced as repeated lease.close
|
|
112
|
-
// log lines and ~5s stalls.
|
|
113
|
-
//
|
|
114
|
-
// Fix: only force-reassign when upstream sends an EXPLICIT "switch node"
|
|
115
|
-
// signal in the body. Other errors (403/503/ECONNREFUSED) stay on the same
|
|
116
|
-
// node — the retry loop will either succeed after upstream recovers, or fall
|
|
117
|
-
// through to direct-https fallback.
|
|
118
|
-
//
|
|
119
|
-
// IMPORTANT: we deliberately do NOT match bare `client_validation_error` —
|
|
120
|
-
// forwardViaNodeProxy wraps local forward errors (ECONNREFUSED etc.) in a
|
|
121
|
-
// client_validation_error body (see createClientValidationErrorBody), so
|
|
122
|
-
// matching that type alone would fire false positives on local-level errors
|
|
123
|
-
// that have nothing to do with node health. We only match specific messages
|
|
124
|
-
// that upstream emits when it explicitly wants the CLI to pick a new node:
|
|
125
|
-
// - homi-crs internalRoutes.js:1025 ("当前代理节点 X 不可用")
|
|
126
|
-
// - homi-crs auth.js / internalRoutes.js ("Claude Code 必须使用 hs claude 指令启动")
|
|
127
|
-
// - control plane server.js ("No active Claude relay nodes are available")
|
|
128
|
-
function shouldRefreshLeaseAfterError(err) {
|
|
129
|
-
const body = String(err?.body || err?.message || '')
|
|
130
|
-
return (
|
|
131
|
-
body.includes('Claude Code 必须使用 hs claude 指令启动') ||
|
|
132
|
-
body.includes('当前代理节点') ||
|
|
133
|
-
body.includes('No active Claude relay nodes are available')
|
|
134
|
-
)
|
|
135
|
-
}
|
|
136
|
-
|
|
137
|
-
function isRetryableNodeLeaseError(err) {
|
|
138
|
-
const message = String(err?.message || '')
|
|
139
|
-
return [
|
|
140
|
-
'No session lease',
|
|
141
|
-
'ECONNREFUSED',
|
|
142
|
-
'ECONNRESET',
|
|
143
|
-
'socket hang up',
|
|
144
|
-
'ETIMEDOUT',
|
|
145
|
-
'EHOSTUNREACH',
|
|
146
|
-
'ENETUNREACH',
|
|
147
|
-
'Upstream proxy CONNECT failed',
|
|
148
|
-
'No available Claude accounts support the requested model',
|
|
149
|
-
'No available Claude accounts',
|
|
150
|
-
'Bad Gateway',
|
|
151
|
-
'HTTP 402',
|
|
152
|
-
'HTTP 403',
|
|
153
|
-
'HTTP 502',
|
|
154
|
-
'HTTP 503',
|
|
155
|
-
'HTTP 504',
|
|
156
|
-
'client_validation_error',
|
|
157
|
-
'upstream response timeout',
|
|
158
|
-
'upstream stream stalled',
|
|
159
|
-
'不可用'
|
|
160
|
-
].some((token) => message.includes(token))
|
|
161
|
-
}
|
|
162
|
-
|
|
163
|
-
async function readJsonResponse(response) {
|
|
164
|
-
const chunks = []
|
|
165
|
-
for await (const chunk of response) chunks.push(Buffer.from(chunk))
|
|
166
|
-
if (!chunks.length) return null
|
|
167
|
-
try {
|
|
168
|
-
return JSON.parse(Buffer.concat(chunks).toString('utf8'))
|
|
169
|
-
} catch {
|
|
170
|
-
return null
|
|
171
|
-
}
|
|
172
|
-
}
|
|
173
|
-
|
|
174
|
-
// 向 relay 申请新 lease(启动时 + CONNECT 失败时被动重试)
|
|
175
|
-
async function fetchFreshLease(config, sessionId, options = {}) {
|
|
176
|
-
const controlPlaneUrl = getControlPlaneUrl(config)
|
|
177
|
-
if (!controlPlaneUrl) throw new Error('Claude relay control plane is not configured')
|
|
178
|
-
|
|
179
|
-
const startedAt = Date.now()
|
|
180
|
-
const response = await fetch(`${controlPlaneUrl}/session/open`, {
|
|
181
|
-
method: 'POST',
|
|
182
|
-
headers: { 'content-type': 'application/json' },
|
|
183
|
-
body: JSON.stringify({
|
|
184
|
-
sessionId,
|
|
185
|
-
bridgeId: config.bridgeId || 'local-bridge',
|
|
186
|
-
deviceId: config.deviceId || '',
|
|
187
|
-
installSource: config.installSource || 'holysheep-cli',
|
|
188
|
-
proxyMode: 'claude-process',
|
|
189
|
-
forceReassign: options.forceReassign === true,
|
|
190
|
-
}),
|
|
191
|
-
})
|
|
192
|
-
|
|
193
|
-
const payload = await response.json().catch(() => null)
|
|
194
|
-
if (!response.ok || !payload?.success || !payload?.data?.ticket) {
|
|
195
|
-
throw new Error(payload?.error?.message || `Failed to open Claude session (HTTP ${response.status})`)
|
|
196
|
-
}
|
|
197
|
-
const openedAt = Date.now()
|
|
198
|
-
payload.data._hsLeaseOpenMs = openedAt - startedAt
|
|
199
|
-
payload.data._hsLeaseOpenedAt = openedAt
|
|
200
|
-
if (payload.data._hsLeaseOpenMs >= SLOW_PATH_LOG_MS) {
|
|
201
|
-
logProxyTiming('lease.open', {
|
|
202
|
-
sessionId,
|
|
203
|
-
nodeId: payload.data.nodeId || '',
|
|
204
|
-
leaseOpenMs: payload.data._hsLeaseOpenMs,
|
|
205
|
-
forceReassign: options.forceReassign === true,
|
|
206
|
-
})
|
|
207
|
-
}
|
|
208
|
-
leaseCache.set(sessionId, payload.data)
|
|
209
|
-
return payload.data
|
|
210
|
-
}
|
|
211
|
-
|
|
212
|
-
// 请求路径:只读缓存,不检查过期时间(续约由失败触发,不由时间触发)
|
|
213
|
-
function getCachedLease(sessionId) {
|
|
214
|
-
const cached = leaseCache.get(sessionId)
|
|
215
|
-
if (!cached) throw new Error('No session lease available')
|
|
216
|
-
return cached
|
|
217
|
-
}
|
|
218
|
-
|
|
219
|
-
function buildAuthHeaders(config, lease) {
|
|
220
|
-
return {
|
|
221
|
-
'x-hs-bridge-id': config.bridgeId || 'local-bridge',
|
|
222
|
-
'x-hs-device-id': config.deviceId || '',
|
|
223
|
-
'x-hs-install-source': config.installSource || 'holysheep-cli',
|
|
224
|
-
'x-hs-session-id': lease.sessionId,
|
|
225
|
-
'x-hs-bridge-ticket': lease.ticket,
|
|
226
|
-
'x-hs-node-id': lease.nodeId || '',
|
|
227
|
-
}
|
|
228
|
-
}
|
|
229
|
-
|
|
230
|
-
// [HolySheep fork v2.1.29 / hs22] Rewrite User-Agent away from claude-cli/*.
|
|
231
|
-
// CRS enforces a "Claude Code CLI must use hs claude" policy keyed on UA —
|
|
232
|
-
// when it sees claude-cli/* or claude-code/*, it 403s even with valid bridge
|
|
233
|
-
// headers, under the assumption the user bypassed the wrapper. Every forward
|
|
234
|
-
// path (node-proxy + direct-https fallback) now normalises UA to
|
|
235
|
-
// holysheep-cli so the wrapper identity is preserved end-to-end.
|
|
236
|
-
function sanitizeClaudeClientHeaders(headers, config) {
|
|
237
|
-
const out = { ...(headers || {}) }
|
|
238
|
-
for (const k of Object.keys(out)) {
|
|
239
|
-
if (/^x-forwarded-|^x-real-ip$|^forwarded$|^via$/i.test(k)) {
|
|
240
|
-
delete out[k]
|
|
241
|
-
continue
|
|
242
|
-
}
|
|
243
|
-
if (/^user-agent$/i.test(k) && /claude-cli|claude-code/i.test(String(out[k] || ''))) {
|
|
244
|
-
out[k] = `holysheep-cli/${(config && config.cliVersion) || 'process-proxy'} (hs-claude-proxy)`
|
|
245
|
-
}
|
|
246
|
-
}
|
|
247
|
-
return out
|
|
248
|
-
}
|
|
249
|
-
|
|
250
|
-
function deriveNodeProxyUrl(lease) {
|
|
251
|
-
if (process.env.HS_CLAUDE_NODE_PROXY_OVERRIDE) {
|
|
252
|
-
return String(process.env.HS_CLAUDE_NODE_PROXY_OVERRIDE).replace(/\/+$/, '')
|
|
253
|
-
}
|
|
254
|
-
// [HolySheep fork v2.1.34 / hs23] Distinguish "field present but empty"
|
|
255
|
-
// (sub2api nodes have no process-proxy) from "field absent" (old lease format).
|
|
256
|
-
// Control plane sets nodeProxyUrl:"" for sub2api nodes = no process proxy exists.
|
|
257
|
-
// Return null so callers skip node-proxy and go straight to direct-https.
|
|
258
|
-
// Only fall through to port inference when field is entirely absent (legacy leases).
|
|
259
|
-
if (Object.prototype.hasOwnProperty.call(lease, 'nodeProxyUrl')) {
|
|
260
|
-
if (!lease.nodeProxyUrl) return null // explicit empty = no proxy on this node
|
|
261
|
-
return String(lease.nodeProxyUrl)
|
|
262
|
-
}
|
|
263
|
-
// Legacy fallback: field absent → infer proxy port from nodeBaseUrl
|
|
264
|
-
if (!lease.nodeBaseUrl) return null
|
|
265
|
-
const upstream = new URL(String(lease.nodeBaseUrl))
|
|
266
|
-
const proxyPort = upstream.port === '3101' ? '3129' : upstream.port
|
|
267
|
-
upstream.port = proxyPort || '3129'
|
|
268
|
-
upstream.pathname = ''
|
|
269
|
-
upstream.search = ''
|
|
270
|
-
upstream.hash = ''
|
|
271
|
-
return upstream.toString().replace(/\/+$/, '')
|
|
272
|
-
}
|
|
273
|
-
|
|
274
|
-
// [HolySheep fork v2.1.27 / hs20] Direct HTTPS fallback.
|
|
275
|
-
//
|
|
276
|
-
// Background: the assigned node's forward-proxy at :3129 can be unreachable
|
|
277
|
-
// (we've seen node3's proxy offline while node3's /v1/messages at :3101 is
|
|
278
|
-
// up). When that happens, claude-agent-acp spawns get ECONNREFUSED on every
|
|
279
|
-
// prompt and the user sees the generic "Internal error: API Error: 400".
|
|
280
|
-
//
|
|
281
|
-
// Fix: when the node-proxy forward fails with ECONNREFUSED (or the proxy
|
|
282
|
-
// URL itself won't resolve), retry the same request directly against
|
|
283
|
-
// `config.baseUrlAnthropic` (usually https://api.holysheep.ai). We still
|
|
284
|
-
// send the bridge headers — the upstream CRS accepts them and reuses the
|
|
285
|
-
// same lease. This gives users a usable claude path even while HolySheep
|
|
286
|
-
// SRE fixes the assigned node.
|
|
287
|
-
function forwardDirectHttps({ config, lease, clientReq, clientRes, trace }) {
|
|
288
|
-
const https = require('https')
|
|
289
|
-
const crsBase = config.baseUrlAnthropic || 'https://api.holysheep.ai'
|
|
290
|
-
const target = new URL(clientReq.url && clientReq.url.startsWith('http') ? clientReq.url : clientReq.url, crsBase)
|
|
291
|
-
return new Promise((resolve, reject) => {
|
|
292
|
-
// [HolySheep fork v2.1.27 / hs20] Sanitize headers before the direct
|
|
293
|
-
// upstream hit. claude-agent-acp usually runs in a clean env so these
|
|
294
|
-
// should never be present, but a future client library could set them
|
|
295
|
-
// (or a proxy earlier in the chain could tag the request) and leak the
|
|
296
|
-
// loopback origin / real client IP to api.holysheep.ai. The upstream
|
|
297
|
-
// doesn't need them — it authenticates off bridge headers + API key.
|
|
298
|
-
const sanitized = sanitizeClaudeClientHeaders(clientReq.headers, config)
|
|
299
|
-
const headers = {
|
|
300
|
-
...sanitized,
|
|
301
|
-
...buildAuthHeaders(config, lease),
|
|
302
|
-
host: target.host,
|
|
303
|
-
connection: 'close',
|
|
304
|
-
}
|
|
305
|
-
const upReq = https.request({
|
|
306
|
-
hostname: target.hostname,
|
|
307
|
-
port: target.port || 443,
|
|
308
|
-
path: target.pathname + target.search,
|
|
309
|
-
method: clientReq.method,
|
|
310
|
-
headers,
|
|
311
|
-
timeout: RESPONSE_TIMEOUT_MS + 5000,
|
|
312
|
-
}, (upRes) => {
|
|
313
|
-
clientRes.writeHead(upRes.statusCode, upRes.headers)
|
|
314
|
-
upRes.pipe(clientRes)
|
|
315
|
-
upRes.on('end', resolve)
|
|
316
|
-
upRes.on('error', reject)
|
|
317
|
-
})
|
|
318
|
-
upReq.on('error', reject)
|
|
319
|
-
upReq.on('timeout', () => {
|
|
320
|
-
upReq.destroy(new Error('direct-https upstream timeout'))
|
|
321
|
-
})
|
|
322
|
-
clientReq.pipe(upReq)
|
|
323
|
-
})
|
|
324
|
-
}
|
|
325
|
-
|
|
326
|
-
function forwardViaNodeProxy({ nodeProxyUrl, targetUrl, clientReq, clientRes, extraHeaders = {}, trace = null }) {
|
|
327
|
-
const upstream = new URL(nodeProxyUrl)
|
|
328
|
-
return new Promise((resolve, reject) => {
|
|
329
|
-
const requestStartedAt = Date.now()
|
|
330
|
-
const resolvedTrace = trace || createForwardTrace({
|
|
331
|
-
clientReq,
|
|
332
|
-
targetUrl,
|
|
333
|
-
nodeProxyUrl,
|
|
334
|
-
sessionId: clientReq.headers['x-hs-session-id'] || '',
|
|
335
|
-
lease: null,
|
|
336
|
-
attempt: 0,
|
|
337
|
-
isDirect: !String(clientReq.url || '').startsWith('http'),
|
|
338
|
-
})
|
|
339
|
-
let settled = false
|
|
340
|
-
let responseTimer = null
|
|
341
|
-
let stallTimer = null
|
|
342
|
-
let sawUpstreamResponse = false
|
|
343
|
-
let sawUpstreamData = false
|
|
344
|
-
let forwardReq = null
|
|
345
|
-
let upstreamAssignedAt = null
|
|
346
|
-
let firstByteAt = null
|
|
347
|
-
|
|
348
|
-
const clearResponseTimer = () => {
|
|
349
|
-
if (responseTimer) {
|
|
350
|
-
clearTimeout(responseTimer)
|
|
351
|
-
responseTimer = null
|
|
352
|
-
}
|
|
353
|
-
}
|
|
354
|
-
|
|
355
|
-
const clearStallTimer = () => {
|
|
356
|
-
if (stallTimer) {
|
|
357
|
-
clearTimeout(stallTimer)
|
|
358
|
-
stallTimer = null
|
|
359
|
-
}
|
|
360
|
-
}
|
|
361
|
-
|
|
362
|
-
const clearTimers = () => {
|
|
363
|
-
clearResponseTimer()
|
|
364
|
-
clearStallTimer()
|
|
365
|
-
}
|
|
366
|
-
|
|
367
|
-
const finish = () => {
|
|
368
|
-
if (settled) return
|
|
369
|
-
settled = true
|
|
370
|
-
clearTimers()
|
|
371
|
-
const finishedAt = Date.now()
|
|
372
|
-
const totalMs = finishedAt - requestStartedAt
|
|
373
|
-
if (totalMs >= SLOW_PATH_LOG_MS) {
|
|
374
|
-
logProxyTiming('request.complete', {
|
|
375
|
-
...resolvedTrace,
|
|
376
|
-
totalMs,
|
|
377
|
-
connectMs: upstreamAssignedAt ? upstreamAssignedAt - requestStartedAt : undefined,
|
|
378
|
-
firstByteMs: firstByteAt ? firstByteAt - requestStartedAt : undefined,
|
|
379
|
-
streamMs: firstByteAt ? finishedAt - firstByteAt : undefined,
|
|
380
|
-
bytesStarted: sawUpstreamData,
|
|
381
|
-
})
|
|
382
|
-
}
|
|
383
|
-
resolve()
|
|
384
|
-
}
|
|
385
|
-
|
|
386
|
-
const failWithAnthropicError = (message) => {
|
|
387
|
-
if (settled) return
|
|
388
|
-
settled = true
|
|
389
|
-
clearTimers()
|
|
390
|
-
const failedAt = Date.now()
|
|
391
|
-
logProxyTiming('request.fail', {
|
|
392
|
-
...resolvedTrace,
|
|
393
|
-
totalMs: failedAt - requestStartedAt,
|
|
394
|
-
connectMs: upstreamAssignedAt ? upstreamAssignedAt - requestStartedAt : undefined,
|
|
395
|
-
firstByteMs: firstByteAt ? firstByteAt - requestStartedAt : undefined,
|
|
396
|
-
bytesStarted: sawUpstreamData,
|
|
397
|
-
error: message,
|
|
398
|
-
})
|
|
399
|
-
if (forwardReq && !forwardReq.destroyed) {
|
|
400
|
-
forwardReq.destroy()
|
|
401
|
-
}
|
|
402
|
-
const error = createForwardError(400, createClientValidationErrorBody(message))
|
|
403
|
-
error.message = message
|
|
404
|
-
reject(error)
|
|
405
|
-
}
|
|
406
|
-
|
|
407
|
-
const fail = (error) => {
|
|
408
|
-
const err = error instanceof Error ? error : new Error(String(error || 'Proxy error'))
|
|
409
|
-
return failWithAnthropicError(err.message || 'Proxy error')
|
|
410
|
-
}
|
|
411
|
-
|
|
412
|
-
const armResponseTimer = () => {
|
|
413
|
-
if (settled || sawUpstreamResponse || responseTimer) return
|
|
414
|
-
responseTimer = setTimeout(() => failWithAnthropicError('upstream response timeout'), RESPONSE_TIMEOUT_MS)
|
|
415
|
-
}
|
|
416
|
-
|
|
417
|
-
const armStallTimer = () => {
|
|
418
|
-
if (settled) return
|
|
419
|
-
sawUpstreamData = true
|
|
420
|
-
clearStallTimer()
|
|
421
|
-
stallTimer = setTimeout(() => failWithAnthropicError('upstream stream stalled'), STALL_TIMEOUT_MS)
|
|
422
|
-
}
|
|
423
|
-
|
|
424
|
-
// [HolySheep fork v2.1.29 / hs22] Same UA-rewrite logic as forwardDirectHttps.
|
|
425
|
-
// Even when we go through node4:3129, the forward-proxy delivers the UA
|
|
426
|
-
// verbatim to upstream CRS; keeping claude-cli/* gets us 403.
|
|
427
|
-
// `extraHeaders` already contains bridge auth via buildAuthHeaders.
|
|
428
|
-
// `config` may be undefined here because forwardViaNodeProxy doesn't take
|
|
429
|
-
// one; use a minimal stub — cliVersion is optional.
|
|
430
|
-
const finalHeaders = {
|
|
431
|
-
...sanitizeClaudeClientHeaders(clientReq.headers, { cliVersion: undefined }),
|
|
432
|
-
...extraHeaders,
|
|
433
|
-
host: targetUrl.host,
|
|
434
|
-
connection: 'close',
|
|
435
|
-
}
|
|
436
|
-
// Build the forward-proxy path. CRS's per-node proxy at :3129 accepts an
|
|
437
|
-
// absolute URL as the HTTP/1.1 request target and rewrites the scheme
|
|
438
|
-
// internally. We ALWAYS send http:// in the path (even if targetUrl is
|
|
439
|
-
// https://) because:
|
|
440
|
-
// 1. node:3129 listens on plain HTTP and ignores the scheme of the
|
|
441
|
-
// request-target — it reads `targetUrl = new URL(req.url)`,
|
|
442
|
-
// validates hostname, and proxies upstream.
|
|
443
|
-
// 2. bun's `http.request` treats `path: 'https://...'` as a signal to
|
|
444
|
-
// issue a CONNECT tunnel (instead of a plain absolute-URI forward
|
|
445
|
-
// request), which silently drops the x-hs-* headers we injected
|
|
446
|
-
// because they would only ride on the outer CONNECT, not the inner
|
|
447
|
-
// TLS tunnel — yielding 403 'Missing bridge session headers'.
|
|
448
|
-
// (Node's `http.request` does NOT have this behaviour; the bug is
|
|
449
|
-
// bun-specific. Reproduced under bun 1.3.9 at AionUi's runtime.)
|
|
450
|
-
// Rewriting to http:// preserves correctness on both runtimes.
|
|
451
|
-
const forwardPath = (() => {
|
|
452
|
-
const clone = new URL(targetUrl.toString())
|
|
453
|
-
clone.protocol = 'http:'
|
|
454
|
-
return clone.toString()
|
|
455
|
-
})()
|
|
456
|
-
if (ENABLE_TIMING_LOG) {
|
|
457
|
-
const hsHeaders = Object.fromEntries(
|
|
458
|
-
Object.entries(finalHeaders).filter(([k]) => /^x-hs-/i.test(k))
|
|
459
|
-
)
|
|
460
|
-
console.error(
|
|
461
|
-
`[hs-claude-proxy] forward.headers ${JSON.stringify({
|
|
462
|
-
target: sanitizeUrl(targetUrl),
|
|
463
|
-
forwardPath,
|
|
464
|
-
node: sanitizeUrl(nodeProxyUrl),
|
|
465
|
-
hsHeaders,
|
|
466
|
-
clientHeadersKeys: Object.keys(clientReq.headers).slice(0, 20),
|
|
467
|
-
})}`
|
|
468
|
-
)
|
|
469
|
-
}
|
|
470
|
-
forwardReq = http.request({
|
|
471
|
-
host: upstream.hostname,
|
|
472
|
-
port: Number(upstream.port || 80),
|
|
473
|
-
method: clientReq.method,
|
|
474
|
-
path: forwardPath,
|
|
475
|
-
agent: false,
|
|
476
|
-
headers: finalHeaders,
|
|
477
|
-
}, (forwardRes) => {
|
|
478
|
-
sawUpstreamResponse = true
|
|
479
|
-
clearResponseTimer()
|
|
480
|
-
|
|
481
|
-
const status = forwardRes.statusCode || 502
|
|
482
|
-
if (status === 403 || status === 502 || status === 503) {
|
|
483
|
-
const chunks = []
|
|
484
|
-
forwardRes.on('data', (c) => chunks.push(c))
|
|
485
|
-
forwardRes.on('end', () => {
|
|
486
|
-
if (settled) return
|
|
487
|
-
settled = true
|
|
488
|
-
clearTimers()
|
|
489
|
-
const body = Buffer.concat(chunks).toString('utf8')
|
|
490
|
-
const error = createForwardError(status, body)
|
|
491
|
-
logProxyTiming('request.upstream_error', {
|
|
492
|
-
...resolvedTrace,
|
|
493
|
-
status,
|
|
494
|
-
totalMs: Date.now() - requestStartedAt,
|
|
495
|
-
body: String(body || '').slice(0, 200),
|
|
496
|
-
})
|
|
497
|
-
reject(error)
|
|
498
|
-
})
|
|
499
|
-
forwardRes.on('error', fail)
|
|
500
|
-
return
|
|
501
|
-
}
|
|
502
|
-
|
|
503
|
-
upstreamAssignedAt = upstreamAssignedAt || Date.now()
|
|
504
|
-
clientRes.writeHead(status, forwardRes.headers)
|
|
505
|
-
forwardRes.on('data', () => {
|
|
506
|
-
if (!firstByteAt) {
|
|
507
|
-
firstByteAt = Date.now()
|
|
508
|
-
logProxyTiming('request.first_byte', {
|
|
509
|
-
...resolvedTrace,
|
|
510
|
-
firstByteMs: firstByteAt - requestStartedAt,
|
|
511
|
-
})
|
|
512
|
-
}
|
|
513
|
-
armStallTimer()
|
|
514
|
-
})
|
|
515
|
-
forwardRes.once('end', () => {
|
|
516
|
-
clearStallTimer()
|
|
517
|
-
finish()
|
|
518
|
-
})
|
|
519
|
-
forwardRes.once('close', () => {
|
|
520
|
-
clearStallTimer()
|
|
521
|
-
finish()
|
|
522
|
-
})
|
|
523
|
-
forwardRes.once('error', fail)
|
|
524
|
-
forwardRes.pipe(clientRes)
|
|
525
|
-
})
|
|
526
|
-
|
|
527
|
-
forwardReq.on('socket', (socket) => {
|
|
528
|
-
upstreamAssignedAt = upstreamAssignedAt || Date.now()
|
|
529
|
-
socket.once('connect', () => {
|
|
530
|
-
upstreamAssignedAt = upstreamAssignedAt || Date.now()
|
|
531
|
-
})
|
|
532
|
-
})
|
|
533
|
-
forwardReq.setTimeout(RESPONSE_TIMEOUT_MS, () => {
|
|
534
|
-
if (!sawUpstreamResponse) failWithAnthropicError('upstream response timeout')
|
|
535
|
-
})
|
|
536
|
-
forwardReq.once('error', fail)
|
|
537
|
-
clientReq.once('aborted', () => finish())
|
|
538
|
-
|
|
539
|
-
armResponseTimer()
|
|
540
|
-
clientReq.pipe(forwardReq)
|
|
541
|
-
})
|
|
542
|
-
}
|
|
543
|
-
|
|
544
|
-
function createConnectTunnel(proxyUrl, target, headers) {
|
|
545
|
-
return new Promise((resolve, reject) => {
|
|
546
|
-
const upstream = new URL(proxyUrl)
|
|
547
|
-
const request = http.request({
|
|
548
|
-
host: upstream.hostname,
|
|
549
|
-
port: Number(upstream.port || 80),
|
|
550
|
-
method: 'CONNECT',
|
|
551
|
-
path: target,
|
|
552
|
-
headers,
|
|
553
|
-
agent: false,
|
|
554
|
-
timeout: 15000, // CONNECT 握手 15 秒超时
|
|
555
|
-
})
|
|
556
|
-
|
|
557
|
-
request.once('connect', (response, socket, head) => {
|
|
558
|
-
if (response.statusCode !== 200) {
|
|
559
|
-
socket.destroy()
|
|
560
|
-
return reject(new Error(`Upstream proxy CONNECT failed (HTTP ${response.statusCode})`))
|
|
561
|
-
}
|
|
562
|
-
if (head?.length) socket.unshift(head)
|
|
563
|
-
resolve(socket)
|
|
564
|
-
})
|
|
565
|
-
request.once('timeout', () => {
|
|
566
|
-
request.destroy(new Error('CONNECT tunnel handshake timeout'))
|
|
567
|
-
})
|
|
568
|
-
request.once('error', reject)
|
|
569
|
-
request.end()
|
|
570
|
-
})
|
|
571
|
-
}
|
|
572
|
-
|
|
573
|
-
// 等上游返回 first byte 的最大时间。
|
|
574
|
-
// 这里不能默认等 5 分钟,否则上游/代理链路卡死时,用户会感觉每次下一条请求都被“挂住”。
|
|
575
|
-
// 默认快速失败为 30 秒,仍可通过环境变量覆盖。
|
|
576
|
-
const RESPONSE_TIMEOUT_MS = Number(process.env.HS_CLAUDE_RESPONSE_TIMEOUT_MS) || 30000
|
|
577
|
-
// 上游 stream 已开始后,相邻两个 chunk 之间的最大间隔。
|
|
578
|
-
const STALL_TIMEOUT_MS = Number(process.env.HS_CLAUDE_STALL_TIMEOUT_MS) || 120000
|
|
579
|
-
|
|
580
|
-
function pipeWithCleanup(a, b) {
|
|
581
|
-
for (const sock of [a, b]) {
|
|
582
|
-
if (typeof sock.setKeepAlive === 'function') sock.setKeepAlive(true, 10000)
|
|
583
|
-
}
|
|
584
|
-
|
|
585
|
-
let timer = null
|
|
586
|
-
let closed = false
|
|
587
|
-
let streaming = false
|
|
588
|
-
|
|
589
|
-
const clearTimer = () => {
|
|
590
|
-
if (timer) {
|
|
591
|
-
clearTimeout(timer)
|
|
592
|
-
timer = null
|
|
593
|
-
}
|
|
594
|
-
}
|
|
595
|
-
|
|
596
|
-
const destroySocket = (sock, err) => {
|
|
597
|
-
if (!sock || sock.destroyed) return
|
|
598
|
-
if (err) sock.destroy(err)
|
|
599
|
-
else sock.destroy()
|
|
600
|
-
}
|
|
601
|
-
|
|
602
|
-
const close = (reason) => {
|
|
603
|
-
if (closed) return
|
|
604
|
-
closed = true
|
|
605
|
-
clearTimer()
|
|
606
|
-
const err = reason ? new Error(reason) : null
|
|
607
|
-
destroySocket(a, err)
|
|
608
|
-
destroySocket(b, err)
|
|
609
|
-
}
|
|
610
|
-
|
|
611
|
-
const armResponseTimer = () => {
|
|
612
|
-
if (streaming || closed || timer) return
|
|
613
|
-
timer = setTimeout(() => close('upstream response timeout'), RESPONSE_TIMEOUT_MS)
|
|
614
|
-
}
|
|
615
|
-
|
|
616
|
-
const armStallTimer = () => {
|
|
617
|
-
if (closed) return
|
|
618
|
-
streaming = true
|
|
619
|
-
clearTimer()
|
|
620
|
-
timer = setTimeout(() => close('upstream stream stalled'), STALL_TIMEOUT_MS)
|
|
621
|
-
}
|
|
622
|
-
|
|
623
|
-
b.on('data', armStallTimer)
|
|
624
|
-
|
|
625
|
-
armResponseTimer()
|
|
626
|
-
a.pipe(b)
|
|
627
|
-
b.pipe(a)
|
|
628
|
-
|
|
629
|
-
a.once('error', () => close())
|
|
630
|
-
b.once('error', () => close())
|
|
631
|
-
a.once('close', () => close())
|
|
632
|
-
b.once('close', () => close())
|
|
633
|
-
}
|
|
634
|
-
|
|
635
|
-
function createProcessProxyServer({ sessionId, configPath = CONFIG_PATH, allowAnthropicConnect = false }) {
|
|
636
|
-
const server = http.createServer(async (clientReq, clientRes) => {
|
|
637
|
-
const isDirect = !clientReq.url.startsWith('http')
|
|
638
|
-
if (ENABLE_TIMING_LOG) {
|
|
639
|
-
console.error(
|
|
640
|
-
`[hs-claude-proxy] incoming ${JSON.stringify({
|
|
641
|
-
method: clientReq.method,
|
|
642
|
-
url: String(clientReq.url).slice(0, 120),
|
|
643
|
-
ua: String(clientReq.headers['user-agent'] || '').slice(0, 60),
|
|
644
|
-
host: clientReq.headers.host || '',
|
|
645
|
-
isDirect,
|
|
646
|
-
})}`
|
|
647
|
-
)
|
|
648
|
-
}
|
|
649
|
-
|
|
650
|
-
// [HolySheep fork v2.1.30 / hs22b] Buffer the incoming request body ONCE
|
|
651
|
-
// so retries + direct-https fallback can re-send it. Before this, each
|
|
652
|
-
// retry called clientReq.pipe(forwardReq) on a stream that had already
|
|
653
|
-
// been consumed by the previous attempt, making the second/third/...
|
|
654
|
-
// retry (and the fallback) hit CRS with an EMPTY body — upstream
|
|
655
|
-
// responded 400 "model is required" and the user saw no reply. Only
|
|
656
|
-
// the very first (often doomed-to-ECONNREFUSED) attempt had the real
|
|
657
|
-
// body; every retry+fallback arrived empty.
|
|
658
|
-
//
|
|
659
|
-
// Buffer into memory before any forward. Typical claude-agent-acp
|
|
660
|
-
// POST bodies are 5-40 KB so this is fine. Override from wire type
|
|
661
|
-
// to Buffer so downstream functions can pipe(from body stream)
|
|
662
|
-
// multiple times by wrapping a fresh Readable each time.
|
|
663
|
-
let bufferedBody = null
|
|
664
|
-
if (clientReq.method && !['GET', 'HEAD', 'OPTIONS'].includes(clientReq.method.toUpperCase())) {
|
|
665
|
-
try {
|
|
666
|
-
const chunks = []
|
|
667
|
-
for await (const chunk of clientReq) chunks.push(chunk)
|
|
668
|
-
bufferedBody = Buffer.concat(chunks)
|
|
669
|
-
} catch (e) {
|
|
670
|
-
// client disconnected mid-read; bail out early.
|
|
671
|
-
if (!clientRes.headersSent) {
|
|
672
|
-
clientRes.writeHead(400, { 'content-type': 'text/plain' })
|
|
673
|
-
clientRes.end('client disconnected while uploading body: ' + (e && e.message))
|
|
674
|
-
}
|
|
675
|
-
return
|
|
676
|
-
}
|
|
677
|
-
}
|
|
678
|
-
// Shadow `clientReq` with a wrapper that yields a fresh Readable body
|
|
679
|
-
// stream each time `.pipe()` is called. This keeps the rest of the
|
|
680
|
-
// handler unchanged (node-proxy + direct-fallback both still use
|
|
681
|
-
// `clientReq.pipe(upstream)`).
|
|
682
|
-
const { Readable } = require('stream')
|
|
683
|
-
const makeBodyStream = () => {
|
|
684
|
-
if (bufferedBody === null) return null
|
|
685
|
-
const r = new Readable({ read() {} })
|
|
686
|
-
r.push(bufferedBody)
|
|
687
|
-
r.push(null)
|
|
688
|
-
return r
|
|
689
|
-
}
|
|
690
|
-
const originalClientReq = clientReq
|
|
691
|
-
clientReq = new Proxy(originalClientReq, {
|
|
692
|
-
get(target, prop, receiver) {
|
|
693
|
-
if (prop === 'pipe') {
|
|
694
|
-
return (dest, opts) => {
|
|
695
|
-
const s = makeBodyStream()
|
|
696
|
-
if (!s) {
|
|
697
|
-
// GET/HEAD — just end the dest
|
|
698
|
-
if (dest && typeof dest.end === 'function') dest.end()
|
|
699
|
-
return dest
|
|
700
|
-
}
|
|
701
|
-
return s.pipe(dest, opts)
|
|
702
|
-
}
|
|
703
|
-
}
|
|
704
|
-
return Reflect.get(target, prop, receiver)
|
|
705
|
-
},
|
|
706
|
-
})
|
|
707
|
-
|
|
708
|
-
const doForward = async (lease, attempt) => {
|
|
709
|
-
const config = readConfig(configPath)
|
|
710
|
-
const nodeProxyUrl = deriveNodeProxyUrl(lease)
|
|
711
|
-
|
|
712
|
-
// [HolySheep fork v2.1.34 / hs23] nodeProxyUrl===null means the control
|
|
713
|
-
// plane signalled this node has no process proxy (sub2api nodes).
|
|
714
|
-
// Skip node-proxy entirely and go straight to direct-https. This avoids
|
|
715
|
-
// the ECONNREFUSED → retry × MAX_PROXY_RETRIES delay that was burning
|
|
716
|
-
// ~5 s before falling back.
|
|
717
|
-
if (nodeProxyUrl === null) {
|
|
718
|
-
logProxyTiming('request.direct-noproxy', { sessionId, nodeId: lease.nodeId || '', attempt })
|
|
719
|
-
return forwardDirectHttps({ config, lease, clientReq, clientRes, trace: null })
|
|
720
|
-
}
|
|
721
|
-
|
|
722
|
-
if (isDirect) {
|
|
723
|
-
const crsBase = config.baseUrlAnthropic || 'https://api.holysheep.ai'
|
|
724
|
-
const target = new URL(clientReq.url, crsBase)
|
|
725
|
-
return forwardViaNodeProxy({
|
|
726
|
-
nodeProxyUrl,
|
|
727
|
-
targetUrl: target,
|
|
728
|
-
clientReq,
|
|
729
|
-
clientRes,
|
|
730
|
-
extraHeaders: buildAuthHeaders(config, lease),
|
|
731
|
-
trace: createForwardTrace({
|
|
732
|
-
clientReq,
|
|
733
|
-
targetUrl: target,
|
|
734
|
-
nodeProxyUrl,
|
|
735
|
-
sessionId,
|
|
736
|
-
lease,
|
|
737
|
-
attempt,
|
|
738
|
-
isDirect,
|
|
739
|
-
}),
|
|
740
|
-
})
|
|
741
|
-
}
|
|
742
|
-
|
|
743
|
-
const targetUrl = new URL(clientReq.url)
|
|
744
|
-
const headers = {
|
|
745
|
-
...buildAuthHeaders(config, lease),
|
|
746
|
-
host: targetUrl.host,
|
|
747
|
-
}
|
|
748
|
-
return forwardViaNodeProxy({
|
|
749
|
-
nodeProxyUrl,
|
|
750
|
-
targetUrl,
|
|
751
|
-
clientReq,
|
|
752
|
-
clientRes,
|
|
753
|
-
extraHeaders: headers,
|
|
754
|
-
trace: createForwardTrace({
|
|
755
|
-
clientReq,
|
|
756
|
-
targetUrl,
|
|
757
|
-
nodeProxyUrl,
|
|
758
|
-
sessionId,
|
|
759
|
-
lease,
|
|
760
|
-
attempt,
|
|
761
|
-
isDirect,
|
|
762
|
-
}),
|
|
763
|
-
})
|
|
764
|
-
}
|
|
765
|
-
|
|
766
|
-
let lastError = null
|
|
767
|
-
for (let attempt = 0; attempt <= MAX_PROXY_RETRIES; attempt++) {
|
|
768
|
-
try {
|
|
769
|
-
if (attempt === 0) {
|
|
770
|
-
await doForward(getCachedLease(sessionId), attempt)
|
|
771
|
-
} else {
|
|
772
|
-
const config = readConfig(configPath)
|
|
773
|
-
const prevLease = (() => { try { return leaseCache.get(sessionId) } catch { return null } })()
|
|
774
|
-
const forceReassign = shouldRefreshLeaseAfterError(lastError)
|
|
775
|
-
const retryReason = String(lastError?.body || lastError?.message || '').slice(0, 120)
|
|
776
|
-
// [HolySheep fork v2.1.36 / hs24] Emit structured breadcrumb so we
|
|
777
|
-
// can see whether the retry stayed on same node (sticky) or forced
|
|
778
|
-
// reassign. Previously invisible — made diagnosing node-bounce hard.
|
|
779
|
-
if (forceReassign) {
|
|
780
|
-
console.error(`[hs-claude-proxy] lease.force-reassign ${JSON.stringify({
|
|
781
|
-
sessionId, nodeId: prevLease?.nodeId || '', attempt, reason: retryReason,
|
|
782
|
-
})}`)
|
|
783
|
-
} else {
|
|
784
|
-
console.error(`[hs-claude-proxy] lease.sticky-hit ${JSON.stringify({
|
|
785
|
-
sessionId, nodeId: prevLease?.nodeId || '', attempt, retryReason,
|
|
786
|
-
})}`)
|
|
787
|
-
}
|
|
788
|
-
leaseCache.delete(sessionId)
|
|
789
|
-
if (forceReassign) {
|
|
790
|
-
await closeSession(configPath, sessionId)
|
|
791
|
-
}
|
|
792
|
-
const freshLease = await fetchFreshLease(config, sessionId, {
|
|
793
|
-
forceReassign,
|
|
794
|
-
})
|
|
795
|
-
await doForward(freshLease, attempt)
|
|
796
|
-
}
|
|
797
|
-
lastError = null
|
|
798
|
-
break
|
|
799
|
-
} catch (err) {
|
|
800
|
-
lastError = err
|
|
801
|
-
logProxyTiming('request.retry', {
|
|
802
|
-
sessionId,
|
|
803
|
-
attempt,
|
|
804
|
-
error: String(err?.message || err),
|
|
805
|
-
retryable: isRetryableNodeLeaseError(err),
|
|
806
|
-
forceReassign: shouldRefreshLeaseAfterError(err),
|
|
807
|
-
})
|
|
808
|
-
if (clientRes.headersSent) return
|
|
809
|
-
if (!isRetryableNodeLeaseError(err) && attempt > 0) break
|
|
810
|
-
}
|
|
811
|
-
}
|
|
812
|
-
// [HolySheep fork v2.1.27 / hs20] Direct HTTPS fallback.
|
|
813
|
-
// If EVERY node-proxy retry failed with a connection-level error
|
|
814
|
-
// (ECONNREFUSED, EHOSTUNREACH, ETIMEDOUT, or a generic "Proxy error"
|
|
815
|
-
// mentioning ECONNREFUSED in the body), fall back to a direct HTTPS
|
|
816
|
-
// POST against api.holysheep.ai. The relay on that side recognises
|
|
817
|
-
// bridge headers + cr_ keys and forwards to an account pool even when
|
|
818
|
-
// the per-device assigned node is dead. Keeps users productive while
|
|
819
|
-
// SRE re-balances nodes.
|
|
820
|
-
if (lastError && !clientRes.headersSent) {
|
|
821
|
-
const msg = String(lastError?.message || lastError?.body || '')
|
|
822
|
-
const isConnectionLevel = /ECONNREFUSED|EHOSTUNREACH|ETIMEDOUT|ENETUNREACH|connect/i.test(msg)
|
|
823
|
-
if (isConnectionLevel) {
|
|
824
|
-
try {
|
|
825
|
-
const config = readConfig(configPath)
|
|
826
|
-
const lease = getCachedLease(sessionId) || await fetchFreshLease(config, sessionId, {})
|
|
827
|
-
logProxyTiming('request.direct-fallback', {
|
|
828
|
-
sessionId,
|
|
829
|
-
originalError: msg.slice(0, 160),
|
|
830
|
-
})
|
|
831
|
-
await forwardDirectHttps({ config, lease, clientReq, clientRes, trace: null })
|
|
832
|
-
lastError = null
|
|
833
|
-
} catch (fallbackErr) {
|
|
834
|
-
lastError = fallbackErr
|
|
835
|
-
logProxyTiming('request.direct-fallback.fail', {
|
|
836
|
-
sessionId,
|
|
837
|
-
error: String(fallbackErr?.message || fallbackErr),
|
|
838
|
-
})
|
|
839
|
-
}
|
|
840
|
-
}
|
|
841
|
-
}
|
|
842
|
-
if (lastError && !clientRes.headersSent) {
|
|
843
|
-
const status = Number(lastError.statusCode || 502)
|
|
844
|
-
const body = String(lastError.body || lastError.message || 'Proxy error')
|
|
845
|
-
const isJson = body.trim().startsWith('{')
|
|
846
|
-
clientRes.writeHead(status, {
|
|
847
|
-
'content-type': isJson ? 'application/json; charset=utf-8' : 'text/plain; charset=utf-8'
|
|
848
|
-
})
|
|
849
|
-
clientRes.end(body)
|
|
850
|
-
}
|
|
851
|
-
})
|
|
852
|
-
|
|
853
|
-
// Hosts that must NOT be tunneled directly — they must go through CRS via
|
|
854
|
-
// the HTTP path (ANTHROPIC_BASE_URL), not a CONNECT tunnel that bypasses it.
|
|
855
|
-
const BLOCKED_CONNECT = allowAnthropicConnect ? new Set() : new Set(['api.anthropic.com'])
|
|
856
|
-
|
|
857
|
-
server.on('connect', async (req, clientSocket, head) => {
|
|
858
|
-
const target = String(req.url || '').trim()
|
|
859
|
-
if (ENABLE_TIMING_LOG) {
|
|
860
|
-
console.error(
|
|
861
|
-
`[hs-claude-proxy] incoming.CONNECT ${JSON.stringify({
|
|
862
|
-
target: target.slice(0, 120),
|
|
863
|
-
ua: String(req.headers['user-agent'] || '').slice(0, 60),
|
|
864
|
-
headers: Object.keys(req.headers).slice(0, 20),
|
|
865
|
-
})}`
|
|
866
|
-
)
|
|
867
|
-
}
|
|
868
|
-
const [host, rawPort] = target.split(':')
|
|
869
|
-
const port = Number(rawPort || 443)
|
|
870
|
-
if (!host || !Number.isInteger(port) || ![80, 443].includes(port)) {
|
|
871
|
-
clientSocket.write('HTTP/1.1 403 Forbidden\r\n\r\n')
|
|
872
|
-
return clientSocket.destroy()
|
|
873
|
-
}
|
|
874
|
-
|
|
875
|
-
if (BLOCKED_CONNECT.has(host)) {
|
|
876
|
-
// Block direct CONNECT tunnels to Anthropic — traffic must go through
|
|
877
|
-
// ANTHROPIC_BASE_URL so CRS can apply multi-account scheduling.
|
|
878
|
-
clientSocket.write('HTTP/1.1 403 Forbidden\r\ncontent-type: text/plain; charset=utf-8\r\n\r\nDirect tunnel to api.anthropic.com is blocked; use ANTHROPIC_BASE_URL path')
|
|
879
|
-
return clientSocket.destroy()
|
|
880
|
-
}
|
|
881
|
-
|
|
882
|
-
const doConnect = async (lease) => {
|
|
883
|
-
const nodeProxyUrl = deriveNodeProxyUrl(lease)
|
|
884
|
-
// [HolySheep fork v2.1.34 / hs23] No process proxy on this node (sub2api).
|
|
885
|
-
// CONNECT tunnels go directly to the target host — no bridge headers needed.
|
|
886
|
-
if (nodeProxyUrl === null) {
|
|
887
|
-
logProxyTiming('connect.direct-noproxy', { sessionId, nodeId: lease.nodeId || '', target })
|
|
888
|
-
const upstreamSocket = await new Promise((resolve, reject) => {
|
|
889
|
-
const sock = net.connect(port, host, () => resolve(sock))
|
|
890
|
-
sock.once('error', reject)
|
|
891
|
-
sock.setTimeout(15000, () => sock.destroy(new Error('CONNECT direct tunnel timeout')))
|
|
892
|
-
})
|
|
893
|
-
clientSocket.write('HTTP/1.1 200 Connection Established\r\n\r\n')
|
|
894
|
-
if (head?.length) upstreamSocket.write(head)
|
|
895
|
-
pipeWithCleanup(clientSocket, upstreamSocket)
|
|
896
|
-
return
|
|
897
|
-
}
|
|
898
|
-
const upstreamSocket = await createConnectTunnel(
|
|
899
|
-
nodeProxyUrl,
|
|
900
|
-
target,
|
|
901
|
-
buildAuthHeaders(readConfig(configPath), lease)
|
|
902
|
-
)
|
|
903
|
-
clientSocket.write('HTTP/1.1 200 Connection Established\r\n\r\n')
|
|
904
|
-
if (head?.length) upstreamSocket.write(head)
|
|
905
|
-
pipeWithCleanup(clientSocket, upstreamSocket)
|
|
906
|
-
}
|
|
907
|
-
|
|
908
|
-
let lastError = null
|
|
909
|
-
for (let attempt = 0; attempt <= MAX_PROXY_RETRIES; attempt++) {
|
|
910
|
-
try {
|
|
911
|
-
if (attempt === 0) {
|
|
912
|
-
await doConnect(getCachedLease(sessionId))
|
|
913
|
-
} else {
|
|
914
|
-
const config = readConfig(configPath)
|
|
915
|
-
const prevLease = (() => { try { return leaseCache.get(sessionId) } catch { return null } })()
|
|
916
|
-
const forceReassign = shouldRefreshLeaseAfterError(lastError)
|
|
917
|
-
const retryReason = String(lastError?.body || lastError?.message || '').slice(0, 120)
|
|
918
|
-
// [HolySheep fork v2.1.36 / hs24] Same breadcrumb as HTTP retry loop.
|
|
919
|
-
if (forceReassign) {
|
|
920
|
-
console.error(`[hs-claude-proxy] lease.force-reassign ${JSON.stringify({
|
|
921
|
-
sessionId, nodeId: prevLease?.nodeId || '', attempt, reason: retryReason, path: 'CONNECT',
|
|
922
|
-
})}`)
|
|
923
|
-
} else {
|
|
924
|
-
console.error(`[hs-claude-proxy] lease.sticky-hit ${JSON.stringify({
|
|
925
|
-
sessionId, nodeId: prevLease?.nodeId || '', attempt, retryReason, path: 'CONNECT',
|
|
926
|
-
})}`)
|
|
927
|
-
}
|
|
928
|
-
leaseCache.delete(sessionId)
|
|
929
|
-
if (forceReassign) {
|
|
930
|
-
await closeSession(configPath, sessionId)
|
|
931
|
-
}
|
|
932
|
-
const freshLease = await fetchFreshLease(config, sessionId, {
|
|
933
|
-
forceReassign,
|
|
934
|
-
})
|
|
935
|
-
await doConnect(freshLease)
|
|
936
|
-
}
|
|
937
|
-
lastError = null
|
|
938
|
-
break
|
|
939
|
-
} catch (err) {
|
|
940
|
-
lastError = err
|
|
941
|
-
if (!isRetryableNodeLeaseError(err) && attempt > 0) break
|
|
942
|
-
}
|
|
943
|
-
}
|
|
944
|
-
if (lastError) {
|
|
945
|
-
const status = Number(lastError.statusCode || 502)
|
|
946
|
-
const body = String(lastError.body || lastError.message || 'Proxy error')
|
|
947
|
-
const statusText = status === 403 ? 'Forbidden' : status === 503 ? 'Service Unavailable' : 'Bad Gateway'
|
|
948
|
-
const contentType = body.trim().startsWith('{')
|
|
949
|
-
? 'application/json; charset=utf-8'
|
|
950
|
-
: 'text/plain; charset=utf-8'
|
|
951
|
-
clientSocket.write(`HTTP/1.1 ${status} ${statusText}\r\ncontent-type: ${contentType}\r\n\r\n${body}`)
|
|
952
|
-
clientSocket.destroy()
|
|
953
|
-
}
|
|
954
|
-
})
|
|
955
|
-
|
|
956
|
-
return server
|
|
957
|
-
}
|
|
958
|
-
|
|
959
|
-
async function startProcessProxy({ port = null, sessionId = null, configPath = CONFIG_PATH, allowAnthropicConnect = false } = {}) {
|
|
960
|
-
const config = readConfig(configPath)
|
|
961
|
-
const preferredPort = port || getProcessProxyPort(config)
|
|
962
|
-
const effectiveSessionId = sessionId || crypto.randomUUID()
|
|
963
|
-
|
|
964
|
-
// 启动时拿一次 lease,之后靠被动重试维持,不再主动续约
|
|
965
|
-
await fetchFreshLease(config, effectiveSessionId)
|
|
966
|
-
|
|
967
|
-
const server = createProcessProxyServer({ sessionId: effectiveSessionId, configPath, allowAnthropicConnect })
|
|
968
|
-
|
|
969
|
-
return new Promise((resolve, reject) => {
|
|
970
|
-
const tryListen = (p) => {
|
|
971
|
-
server.once('error', (err) => {
|
|
972
|
-
if (err.code === 'EADDRINUSE') {
|
|
973
|
-
server.once('error', reject)
|
|
974
|
-
server.listen(0, '127.0.0.1')
|
|
975
|
-
} else {
|
|
976
|
-
reject(err)
|
|
977
|
-
}
|
|
978
|
-
})
|
|
979
|
-
server.once('listening', () => {
|
|
980
|
-
resolve({ server, port: server.address().port, sessionId: effectiveSessionId })
|
|
981
|
-
})
|
|
982
|
-
server.listen(p, '127.0.0.1')
|
|
983
|
-
}
|
|
984
|
-
tryListen(preferredPort)
|
|
985
|
-
})
|
|
986
|
-
}
|
|
987
|
-
|
|
988
|
-
async function closeSession(configPath, sessionId) {
|
|
989
|
-
if (!sessionId) return
|
|
990
|
-
const config = readConfig(configPath)
|
|
991
|
-
const controlPlaneUrl = getControlPlaneUrl(config)
|
|
992
|
-
if (!controlPlaneUrl) {
|
|
993
|
-
logProxyTiming('lease.close', { sessionId, skipped: 'no-control-plane' })
|
|
994
|
-
return
|
|
995
|
-
}
|
|
996
|
-
const startedAt = Date.now()
|
|
997
|
-
let status = 'unknown'
|
|
998
|
-
try {
|
|
999
|
-
const response = await fetch(`${controlPlaneUrl}/session/close`, {
|
|
1000
|
-
method: 'POST',
|
|
1001
|
-
headers: { 'content-type': 'application/json' },
|
|
1002
|
-
body: JSON.stringify({ sessionId }),
|
|
1003
|
-
})
|
|
1004
|
-
status = response.ok ? 'ok' : `http_${response.status}`
|
|
1005
|
-
} catch (err) {
|
|
1006
|
-
status = `err_${err.name || 'unknown'}`
|
|
1007
|
-
}
|
|
1008
|
-
// Always log lease.close — unlike other events this isn't slow-path-gated
|
|
1009
|
-
// because lease leaks silently accumulate on CRS, making it critical to
|
|
1010
|
-
// observe every release attempt.
|
|
1011
|
-
console.error(
|
|
1012
|
-
`[hs-claude-proxy] lease.close ${JSON.stringify({
|
|
1013
|
-
sessionId,
|
|
1014
|
-
status,
|
|
1015
|
-
durationMs: Date.now() - startedAt,
|
|
1016
|
-
})}`
|
|
1017
|
-
)
|
|
1018
|
-
}
|
|
1019
|
-
|
|
1020
|
-
module.exports = {
|
|
1021
|
-
CONFIG_PATH,
|
|
1022
|
-
DEFAULT_PROXY_PORT,
|
|
1023
|
-
closeSession,
|
|
1024
|
-
getLocalProxyUrl,
|
|
1025
|
-
getProcessProxyPort,
|
|
1026
|
-
getControlPlaneUrl,
|
|
1027
|
-
readConfig,
|
|
1028
|
-
startProcessProxy,
|
|
1029
|
-
writeConfig,
|
|
1030
|
-
}
|