@simonyea/holysheep-cli 1.7.20 → 1.7.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/tools/openclaw-bridge.js +16 -3
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@simonyea/holysheep-cli",
|
|
3
|
-
"version": "1.7.
|
|
3
|
+
"version": "1.7.21",
|
|
4
4
|
"description": "Claude Code/Cursor/Cline API relay for China — ¥1=$1, WeChat/Alipay payment, no credit card, no VPN. One command setup for all AI coding tools.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"openai-china",
|
|
@@ -6,6 +6,19 @@ const http = require('http')
|
|
|
6
6
|
const path = require('path')
|
|
7
7
|
const os = require('os')
|
|
8
8
|
const fetch = global.fetch || require('node-fetch')
|
|
9
|
+
const _nodeFetch = require('node-fetch')
|
|
10
|
+
|
|
11
|
+
// Windows 上 api.holysheep.ai 有 IPv6 DNS 记录,但 Windows Server 普遍未启用 IPv6。
|
|
12
|
+
// Node.js 默认 IPv6 优先,会导致每次请求先卡在 IPv6 连接超时再降级 IPv4,
|
|
13
|
+
// 超过 OpenClaw embedded agent 的 timeout 阈值,触发 "LLM request timed out"。
|
|
14
|
+
// 解决方案:Windows 下强制用 node-fetch + https.Agent({family:4}) 只走 IPv4。
|
|
15
|
+
function upstreamFetch(url, options) {
|
|
16
|
+
if (process.platform === 'win32' && String(url).startsWith('https://')) {
|
|
17
|
+
const https = require('https')
|
|
18
|
+
return _nodeFetch(url, { ...options, agent: new https.Agent({ family: 4 }) })
|
|
19
|
+
}
|
|
20
|
+
return fetch(url, options)
|
|
21
|
+
}
|
|
9
22
|
|
|
10
23
|
const OPENCLAW_DIR = path.join(os.homedir(), '.openclaw')
|
|
11
24
|
const BRIDGE_CONFIG_FILE = path.join(OPENCLAW_DIR, 'holysheep-bridge.json')
|
|
@@ -516,7 +529,7 @@ async function relayOpenAIRequest(requestBody, config, res) {
|
|
|
516
529
|
...requestBody,
|
|
517
530
|
stream: requestBody.stream === true,
|
|
518
531
|
}
|
|
519
|
-
const upstream = await
|
|
532
|
+
const upstream = await upstreamFetch(`${config.baseUrlOpenAI.replace(/\/+$/, '')}/chat/completions`, {
|
|
520
533
|
method: 'POST',
|
|
521
534
|
headers: {
|
|
522
535
|
'content-type': 'application/json',
|
|
@@ -584,7 +597,7 @@ async function relayAnthropicStream(requestBody, config, route, res) {
|
|
|
584
597
|
|
|
585
598
|
let upstream
|
|
586
599
|
try {
|
|
587
|
-
upstream = await
|
|
600
|
+
upstream = await upstreamFetch(baseUrl, {
|
|
588
601
|
method: 'POST',
|
|
589
602
|
headers: {
|
|
590
603
|
'content-type': 'application/json',
|
|
@@ -707,7 +720,7 @@ async function relayAnthropicRequest(requestBody, config, route, res) {
|
|
|
707
720
|
? `${config.baseUrlAnthropic.replace(/\/+$/, '')}/minimax/v1/messages`
|
|
708
721
|
: `${config.baseUrlAnthropic.replace(/\/+$/, '')}/v1/messages`
|
|
709
722
|
|
|
710
|
-
const upstream = await
|
|
723
|
+
const upstream = await upstreamFetch(baseUrl, {
|
|
711
724
|
method: 'POST',
|
|
712
725
|
headers: {
|
|
713
726
|
'content-type': 'application/json',
|