@simonyea/holysheep-cli 1.6.13 → 1.6.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -218,6 +218,7 @@ A: OpenClaw 需要 Node.js 20+,运行 `node --version` 确认版本后重试
218
218
 
219
219
  ## Changelog
220
220
 
221
+ - **v1.6.14** — OpenClaw 新增 `gpt-5.3-codex-spark` 模型,通过本地 bridge 路由到 HolySheep `/v1`
221
222
  - **v1.6.13** — Codex 配置改为直接写 `api_key` 到 config.toml,不再依赖环境变量,修复 Windows 上 setup 后无需重启终端即可使用;同时精简工具列表,只保留 Claude Code / Codex / Droid / OpenClaw
222
223
  - **v1.6.12** — 修复 OpenClaw Bridge 对 GPT-5.4 的流式响应转换,避免 `holysheep/gpt-5.4` 在 OpenClaw 中报错;同时增强 Dashboard URL 解析,减少安装后浏览器打开黑屏/空白页
223
224
  - **v1.6.11** — OpenClaw 新增本地 HolySheep Bridge,统一暴露单一 `holysheep` provider 以支持自由切换 GPT / Claude / MiniMax;同时保留用户所选默认模型,不再强制 GPT-5.4 作为 primary
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@simonyea/holysheep-cli",
3
- "version": "1.6.13",
3
+ "version": "1.6.15",
4
4
  "description": "Claude Code/Cursor/Cline API relay for China — ¥1=$1, WeChat/Alipay payment, no credit card, no VPN. One command setup for all AI coding tools.",
5
5
  "keywords": [
6
6
  "openai-china",
@@ -112,11 +112,12 @@ async function setup(options) {
112
112
 
113
113
  // Step 1.5: 选择要配置的模型
114
114
  const MODEL_CHOICES = [
115
- { name: 'gpt-5.4 (GPT 5.4, 通用编码)', value: 'gpt-5.4', checked: true },
116
- { name: 'claude-sonnet-4-6 (Sonnet 4.6, 均衡推荐)', value: 'claude-sonnet-4-6', checked: true },
117
- { name: 'claude-opus-4-6 (Opus 4.6, 强力旗舰)', value: 'claude-opus-4-6', checked: true },
118
- { name: 'MiniMax-M2.7-highspeed (高速经济版)', value: 'MiniMax-M2.7-highspeed', checked: true },
119
- { name: 'claude-haiku-4-5 (Haiku 4.5, 轻快便宜)', value: 'claude-haiku-4-5', checked: true },
115
+ { name: 'gpt-5.4 (GPT 5.4, 通用编码)', value: 'gpt-5.4', checked: true },
116
+ { name: 'gpt-5.3-codex-spark (GPT 5.3 Codex Spark, 编码)', value: 'gpt-5.3-codex-spark', checked: true },
117
+ { name: 'claude-sonnet-4-6 (Sonnet 4.6, 均衡推荐)', value: 'claude-sonnet-4-6', checked: true },
118
+ { name: 'claude-opus-4-6 (Opus 4.6, 强力旗舰)', value: 'claude-opus-4-6', checked: true },
119
+ { name: 'MiniMax-M2.7-highspeed (高速经济版)', value: 'MiniMax-M2.7-highspeed', checked: true },
120
+ { name: 'claude-haiku-4-5 (Haiku 4.5, 轻快便宜)', value: 'claude-haiku-4-5', checked: true },
120
121
  ]
121
122
  const { selectedModels } = await inquirer.prompt([{
122
123
  type: 'checkbox',
@@ -59,6 +59,7 @@ function sendOpenAIStream(res, payload) {
59
59
  const choice = payload.choices?.[0] || {}
60
60
  const message = choice.message || {}
61
61
  const created = payload.created || Math.floor(Date.now() / 1000)
62
+ const messageContent = extractOpenAITextContent(message.content)
62
63
 
63
64
  res.writeHead(200, {
64
65
  'content-type': 'text/event-stream; charset=utf-8',
@@ -75,7 +76,7 @@ function sendOpenAIStream(res, payload) {
75
76
  index: 0,
76
77
  delta: {
77
78
  role: 'assistant',
78
- ...(message.content ? { content: message.content } : {}),
79
+ ...(messageContent ? { content: messageContent } : {}),
79
80
  ...(message.tool_calls ? { tool_calls: message.tool_calls } : {}),
80
81
  },
81
82
  finish_reason: null,
@@ -101,11 +102,30 @@ function normalizeText(value) {
101
102
  if (Array.isArray(value)) return value.map(normalizeText).filter(Boolean).join('\n')
102
103
  if (value && typeof value === 'object') {
103
104
  if (typeof value.text === 'string') return value.text
105
+ if (typeof value.output_text === 'string') return value.output_text
104
106
  if (typeof value.content === 'string') return value.content
107
+ if (typeof value.value === 'string') return value.value
105
108
  }
106
109
  return value == null ? '' : String(value)
107
110
  }
108
111
 
112
+ function extractOpenAITextContent(content) {
113
+ if (typeof content === 'string') return content
114
+ if (!Array.isArray(content)) return normalizeText(content)
115
+
116
+ return content
117
+ .map((part) => {
118
+ if (typeof part === 'string') return part
119
+ if (!part || typeof part !== 'object') return ''
120
+ if (part.type === 'text') return normalizeText(part.text)
121
+ if (part.type === 'output_text') return normalizeText(part.text)
122
+ if (part.type === 'input_text') return normalizeText(part.text)
123
+ return normalizeText(part.text || part.content || part.value)
124
+ })
125
+ .filter(Boolean)
126
+ .join('')
127
+ }
128
+
109
129
  function parseDataUrl(url) {
110
130
  const match = String(url || '').match(/^data:([^;]+);base64,(.+)$/)
111
131
  if (!match) return null
@@ -314,10 +334,102 @@ function pickRoute(model) {
314
334
  return 'openai'
315
335
  }
316
336
 
317
- function parseOpenAIStreamText(text) {
337
+ function responseOutputToText(output) {
338
+ return (output || [])
339
+ .flatMap((item) => {
340
+ if (item?.type === 'message') return item.content || []
341
+ if (item?.content) return item.content
342
+ return []
343
+ })
344
+ .filter((item) => item?.type === 'output_text' || item?.type === 'text')
345
+ .map((item) => extractOpenAITextContent(item.text || item.content || item))
346
+ .filter(Boolean)
347
+ .join('')
348
+ }
349
+
350
+ function responseOutputToToolCalls(output) {
351
+ return (output || [])
352
+ .filter((item) => item?.type === 'function_call' && item.name)
353
+ .map((item, index) => ({
354
+ id: item.call_id || item.id || `call_${index + 1}`,
355
+ type: 'function',
356
+ function: {
357
+ name: item.name,
358
+ arguments: typeof item.arguments === 'string'
359
+ ? item.arguments
360
+ : JSON.stringify(item.arguments || {}),
361
+ },
362
+ }))
363
+ }
364
+
365
+ function responseToChatCompletion(responseBody, requestedModel) {
366
+ const response = responseBody?.response && typeof responseBody.response === 'object'
367
+ ? responseBody.response
368
+ : responseBody
369
+
370
+ const text = responseOutputToText(response.output)
371
+ const toolCalls = responseOutputToToolCalls(response.output)
372
+ const status = String(response.status || '').toLowerCase()
373
+ const finishReason = status === 'completed' || status === '' ? 'stop' : 'length'
374
+ const outputTokens = response.usage?.output_tokens || response.usage?.completion_tokens || 0
375
+ const promptTokens = response.usage?.input_tokens || response.usage?.prompt_tokens || 0
376
+
377
+ return {
378
+ id: response.id || `chatcmpl_${Date.now()}`,
379
+ object: 'chat.completion',
380
+ created: response.created_at || Math.floor(Date.now() / 1000),
381
+ model: requestedModel || response.model,
382
+ choices: [{
383
+ index: 0,
384
+ message: {
385
+ role: 'assistant',
386
+ content: text || null,
387
+ ...(toolCalls.length ? { tool_calls: toolCalls } : {}),
388
+ },
389
+ finish_reason: finishReason,
390
+ }],
391
+ usage: response.usage
392
+ ? {
393
+ prompt_tokens: promptTokens,
394
+ completion_tokens: outputTokens,
395
+ total_tokens: response.usage.total_tokens || (promptTokens + outputTokens),
396
+ }
397
+ : undefined,
398
+ }
399
+ }
400
+
401
+ function normalizeOpenAICompatibleResponse(parsed, requestedModel) {
402
+ if (!parsed || typeof parsed !== 'object') return parsed
403
+
404
+ if (parsed.object === 'response' || Array.isArray(parsed.output)) {
405
+ return responseToChatCompletion(parsed, requestedModel)
406
+ }
407
+
408
+ if (parsed.object === 'chat.completion' || parsed.object === 'chat.completion.chunk') {
409
+ const choice = parsed.choices?.[0]
410
+ if (choice?.message) {
411
+ choice.message = {
412
+ ...choice.message,
413
+ content: extractOpenAITextContent(choice.message.content) || null,
414
+ }
415
+ }
416
+ if (choice?.delta?.content != null) {
417
+ choice.delta = {
418
+ ...choice.delta,
419
+ content: extractOpenAITextContent(choice.delta.content),
420
+ }
421
+ }
422
+ }
423
+
424
+ return parsed
425
+ }
426
+
427
+ function parseOpenAIStreamText(text, requestedModel) {
318
428
  try {
319
429
  const parsed = JSON.parse(String(text || ''))
320
- if (parsed && typeof parsed === 'object') return parsed
430
+ if (parsed && typeof parsed === 'object') {
431
+ return normalizeOpenAICompatibleResponse(parsed, requestedModel)
432
+ }
321
433
  } catch {}
322
434
 
323
435
  const blocks = String(text || '').split(/\r?\n\r?\n+/).filter(Boolean)
@@ -356,11 +468,7 @@ function parseOpenAIStreamText(text) {
356
468
  if (eventName === 'response.completed' && chunk.response) {
357
469
  responseCompleted = chunk.response
358
470
  if (!content) {
359
- const outputText = (chunk.response.output || [])
360
- .flatMap((item) => item?.content || [])
361
- .filter((item) => item?.type === 'output_text' && typeof item.text === 'string')
362
- .map((item) => item.text)
363
- .join('')
471
+ const outputText = responseOutputToText(chunk.response.output)
364
472
  if (outputText) content = outputText
365
473
  }
366
474
  continue
@@ -369,28 +477,23 @@ function parseOpenAIStreamText(text) {
369
477
  finalChunk = chunk
370
478
  const choice = chunk.choices?.[0] || {}
371
479
  const delta = choice.delta || {}
372
- if (delta.content) content += delta.content
373
- else if (choice.message?.content) content += choice.message.content
480
+ const deltaContent = extractOpenAITextContent(delta.content)
481
+ const messageContent = extractOpenAITextContent(choice.message?.content)
482
+ if (deltaContent) content += deltaContent
483
+ else if (messageContent) content += messageContent
374
484
  }
375
485
 
376
486
  if (responseCompleted) {
377
- return {
378
- id: responseCompleted.id || `chatcmpl_${Date.now()}`,
379
- object: 'chat.completion',
380
- created: responseCompleted.created_at || Math.floor(Date.now() / 1000),
381
- model: responseCompleted.model,
382
- choices: [{
383
- index: 0,
384
- message: { role: 'assistant', content: content || null },
385
- finish_reason: responseCompleted.status === 'completed' ? 'stop' : 'length',
386
- }],
387
- usage: responseCompleted.usage,
487
+ const completion = responseToChatCompletion(responseCompleted, requestedModel || responseCompleted.model)
488
+ if (!completion.choices?.[0]?.message?.content && content) {
489
+ completion.choices[0].message.content = content
388
490
  }
491
+ return completion
389
492
  }
390
493
 
391
494
  if (!finalChunk) return null
392
495
 
393
- return {
496
+ return normalizeOpenAICompatibleResponse({
394
497
  id: finalChunk.id || `chatcmpl_${Date.now()}`,
395
498
  object: 'chat.completion',
396
499
  created: finalChunk.created || Math.floor(Date.now() / 1000),
@@ -401,7 +504,7 @@ function parseOpenAIStreamText(text) {
401
504
  finish_reason: finalChunk.choices?.[0]?.finish_reason || 'stop',
402
505
  }],
403
506
  usage: finalChunk.usage,
404
- }
507
+ }, requestedModel)
405
508
  }
406
509
 
407
510
  async function relayOpenAIRequest(requestBody, config, res) {
@@ -420,7 +523,7 @@ async function relayOpenAIRequest(requestBody, config, res) {
420
523
  })
421
524
 
422
525
  const text = await upstream.text()
423
- const parsed = parseOpenAIStreamText(text)
526
+ const parsed = parseOpenAIStreamText(text, requestBody.model)
424
527
  if (upstream.ok && parsed) {
425
528
  if (requestBody.stream) return sendOpenAIStream(res, parsed)
426
529
  return sendJson(res, upstream.status, parsed)
@@ -18,6 +18,7 @@ const DEFAULT_BRIDGE_PORT = 18788
18
18
  const DEFAULT_GATEWAY_PORT = 18789
19
19
  const MAX_PORT_SCAN = 40
20
20
  const OPENCLAW_DEFAULT_MODEL = 'gpt-5.4'
21
+ const OPENCLAW_DEFAULT_CODEX_SPARK_MODEL = 'gpt-5.3-codex-spark'
21
22
  const OPENCLAW_DEFAULT_CLAUDE_MODEL = 'claude-sonnet-4-6'
22
23
  const OPENCLAW_DEFAULT_MINIMAX_MODEL = 'MiniMax-M2.7-highspeed'
23
24
  const OPENCLAW_PROVIDER_NAME = 'holysheep'
@@ -312,7 +313,7 @@ function buildModelEntry(id) {
312
313
  function normalizeRequestedModels(selectedModels) {
313
314
  const requestedModels = Array.isArray(selectedModels) && selectedModels.length > 0
314
315
  ? [...selectedModels]
315
- : [OPENCLAW_DEFAULT_MODEL, OPENCLAW_DEFAULT_CLAUDE_MODEL, OPENCLAW_DEFAULT_MINIMAX_MODEL]
316
+ : [OPENCLAW_DEFAULT_MODEL, OPENCLAW_DEFAULT_CODEX_SPARK_MODEL, OPENCLAW_DEFAULT_CLAUDE_MODEL, OPENCLAW_DEFAULT_MINIMAX_MODEL]
316
317
 
317
318
  if (!requestedModels.includes(OPENCLAW_DEFAULT_MODEL)) requestedModels.unshift(OPENCLAW_DEFAULT_MODEL)
318
319
  return Array.from(new Set(requestedModels))