wechat-to-anything 0.6.13 → 0.6.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.en.md CHANGED
@@ -34,7 +34,7 @@
34
34
  ## Features
35
35
 
36
36
  - 🔌 **Zero-config setup** — One `npx` command, no cloning, no configuration
37
- - 🧠 **Agent-agnostic** — Works with any OpenAI-compatible API (Codex / Gemini / Claude / custom)
37
+ - 🧠 **Agent-agnostic** — Works with any OpenAI-compatible API (Codex / Gemini / Claude / OpenCode / custom)
38
38
  - 📡 **Full multimodal** — Text, images, voice, video, files — bidirectional
39
39
  - 🤖 **Multi-Agent** — Connect multiple Agents simultaneously, route with `@` prefix
40
40
  - ⌨️ **Typing indicator** — Shows "typing..." while Agent is thinking
@@ -50,6 +50,18 @@
50
50
  | 🎬 Video | ✅ Auto-receive | ✅ With thumbnail |
51
51
  | 📄 File | ✅ Content extraction | ✅ Downloadable |
52
52
 
53
+ ### Supported Agents / Tools
54
+
55
+ | Agent | Integration | Install |
56
+ |-------|------------|---------|
57
+ | ⌬ [OpenCode](https://opencode.ai) | `examples/opencode/` template | `npm i -g opencode-ai` |
58
+ | 🤖 [OpenAI Codex](https://github.com/openai/codex) | `--codex` | `npm i -g @openai/codex` |
59
+ | 💎 [Google Gemini](https://github.com/google/gemini-cli) | `--gemini` | `npm i -g @google/gemini-cli` |
60
+ | 🧬 [Claude Code](https://github.com/anthropic-ai/claude-code) | `--claude` | `npm i -g @anthropic-ai/claude-code` |
61
+ | 🐾 [OpenClaw](https://github.com/nicepkg/openclaw) | `--openclaw` | `npm i -g openclaw` |
62
+ | 🔗 Any OpenAI-compatible API | Pass URL directly | — |
63
+ | 📡 [ACP](https://agentcommunicationprotocol.dev/) Agent | `--agent name=acp://...` | — |
64
+
53
65
  ## Quick Start
54
66
 
55
67
  ```bash
@@ -59,6 +71,9 @@ npx wechat-to-anything --gemini # Google Gemini
59
71
  npx wechat-to-anything --claude # Claude Code
60
72
  npx wechat-to-anything --openclaw # OpenClaw
61
73
 
74
+ # Or use example templates for more Agents:
75
+ cd examples/opencode && node server.mjs # OpenCode (free models included)
76
+
62
77
  # Or pass a URL directly:
63
78
  npx wechat-to-anything http://your-agent:8000/v1
64
79
  ```
@@ -132,6 +147,8 @@ Include specific formats in Agent responses to automatically send media:
132
147
  ```
133
148
 
134
149
  > Examples: [image-test.mjs](examples/image-test.mjs) · [voice-test.mjs](examples/voice-test.mjs) · [video-test-local.mjs](examples/video-test-local.mjs) · [file-test.mjs](examples/file-test.mjs)
150
+ >
151
+ > Agent templates: [claude-code](examples/claude-code/) · [opencode](examples/opencode/) · [openai](examples/openai/)
135
152
 
136
153
  ## Multi-Agent Mode
137
154
 
package/README.md CHANGED
@@ -34,7 +34,7 @@
34
34
  ## 特性
35
35
 
36
36
  - 🔌 **零依赖接入** — `npx` 一条命令,无需 clone、无需配置
37
- - 🧠 **Agent 无关** — 支持任何 OpenAI 兼容 API(Codex / Gemini / Claude / 自建)
37
+ - 🧠 **Agent 无关** — 支持任何 OpenAI 兼容 API(Codex / Gemini / Claude / OpenCode / 自建)
38
38
  - 📡 **全模态** — 文本、图片、语音、视频、文件,双向全覆盖
39
39
  - 🤖 **多 Agent** — 同时接入多个 Agent,`@` 路由切换
40
40
  - ⌨️ **打字指示器** — Agent 思考时显示"对方正在输入"
@@ -50,6 +50,18 @@
50
50
  | 🎬 视频 | ✅ 自动接收 | ✅ 带缩略图 |
51
51
  | 📄 文件 | ✅ 提取内容 | ✅ 可下载 |
52
52
 
53
+ ### 已支持的 Agent / 工具
54
+
55
+ | Agent | 接入方式 | 安装 |
56
+ |-------|---------|------|
57
+ | ⌬ [OpenCode](https://opencode.ai) | `examples/opencode/` 模板 | `npm i -g opencode-ai` |
58
+ | 🤖 [OpenAI Codex](https://github.com/openai/codex) | `--codex` | `npm i -g @openai/codex` |
59
+ | 💎 [Google Gemini](https://github.com/google/gemini-cli) | `--gemini` | `npm i -g @google/gemini-cli` |
60
+ | 🧬 [Claude Code](https://github.com/anthropic-ai/claude-code) | `--claude` | `npm i -g @anthropic-ai/claude-code` |
61
+ | 🐾 [OpenClaw](https://github.com/nicepkg/openclaw) | `--openclaw` | `npm i -g openclaw` |
62
+ | 🔗 任何 OpenAI 兼容 API | 直接传 URL | — |
63
+ | 📡 [ACP 协议](https://agentcommunicationprotocol.dev/) Agent | `--agent name=acp://...` | — |
64
+
53
65
  ## 快速开始
54
66
 
55
67
  ```bash
@@ -59,6 +71,9 @@ npx wechat-to-anything --gemini # Google Gemini
59
71
  npx wechat-to-anything --claude # Claude Code
60
72
  npx wechat-to-anything --openclaw # OpenClaw
61
73
 
74
+ # 或用 examples 模板接入更多 Agent:
75
+ cd examples/opencode && node server.mjs # OpenCode(含免费模型)
76
+
62
77
  # 或直接传 URL:
63
78
  npx wechat-to-anything http://your-agent:8000/v1
64
79
  ```
@@ -132,6 +147,8 @@ Agent 回复中包含特定格式即可自动发送多媒体:
132
147
  ```
133
148
 
134
149
  > 示例:[image-test.mjs](examples/image-test.mjs) · [voice-test.mjs](examples/voice-test.mjs) · [video-test-local.mjs](examples/video-test-local.mjs) · [file-test.mjs](examples/file-test.mjs)
150
+ >
151
+ > Agent 模板:[claude-code](examples/claude-code/) · [opencode](examples/opencode/) · [openai](examples/openai/)
135
152
 
136
153
  ## 多 Agent 模式
137
154
 
package/cli/bridge.mjs CHANGED
@@ -307,8 +307,8 @@ export async function start(agents, defaultAgent, { port = 9099 } = {}) {
307
307
  // 构建发给 Agent 的消息
308
308
  let agentMessages;
309
309
 
310
- if (media?.type === "image") {
311
- // 图片:下载解密,缓存 base64,等待用户发文字
310
+ if (media?.type === "image" && !media.fromRef) {
311
+ // 直接发送的图片:下载解密,缓存 base64,等待用户发文字
312
312
  console.log(pc.cyan(`← [微信] ${from}: [图片] (等待文字问题...)`));
313
313
  try {
314
314
  const buf = await downloadAndDecrypt(media.encryptQueryParam, media.aesKey);
@@ -322,6 +322,23 @@ export async function start(agents, defaultAgent, { port = 9099 } = {}) {
322
322
  }
323
323
  continue; // 不发给 Agent,等文字
324
324
 
325
+ } else if (media?.type === "image" && media.fromRef && text) {
326
+ // 引用图片 + 文字:立即合并发送(不缓存)
327
+ console.log(pc.cyan(`← [微信] ${from}: [引用图片+文字] ${text.slice(0, 80)}`));
328
+ try {
329
+ const buf = await downloadAndDecrypt(media.encryptQueryParam, media.aesKey);
330
+ agentMessages = [{
331
+ role: "user",
332
+ content: [
333
+ { type: "text", text },
334
+ { type: "image_url", image_url: { url: `data:image/jpeg;base64,${buf.toString("base64")}` } },
335
+ ],
336
+ }];
337
+ } catch (err) {
338
+ console.error(pc.red(` 引用图片下载失败: ${err.message}`));
339
+ agentMessages = [{ role: "user", content: text }];
340
+ }
341
+
325
342
  } else if (text) {
326
343
  // === 管理命令 ===
327
344
  if (multiMode && text.trim() === "@list") {
@@ -376,26 +393,32 @@ export async function start(agents, defaultAgent, { port = 9099 } = {}) {
376
393
  continue;
377
394
  }
378
395
  } else if (media?.type === "file") {
379
- console.log(pc.cyan(`← [微信] ${from}: [文件] ${media.fileName}`));
396
+ const label = media.fromRef ? "引用文件" : "文件";
397
+ console.log(pc.cyan(`← [微信] ${from}: [${label}] ${media.fileName}`));
380
398
  try {
381
399
  const { buffer } = await downloadMediaToFile(media.encryptQueryParam, media.aesKey, media.fileName.split(".").pop());
382
400
  if (buffer.length < 100_000) {
383
401
  const content = buffer.toString("utf-8");
384
402
  if (!content.includes("\x00")) {
385
- agentMessages = [{ role: "user", content: `用户发送了文件 "${media.fileName}",内容如下:\n\n${content}` }];
403
+ const prefix = (media.fromRef && text) ? `${text}\n\n` : "";
404
+ agentMessages = [{ role: "user", content: `${prefix}用户发送了文件 "${media.fileName}",内容如下:\n\n${content}` }];
386
405
  } else {
387
- agentMessages = [{ role: "user", content: `用户发送了文件 "${media.fileName}"(${(buffer.length / 1024).toFixed(1)} KB,二进制文件)` }];
406
+ const prefix = (media.fromRef && text) ? `${text}\n\n` : "";
407
+ agentMessages = [{ role: "user", content: `${prefix}用户发送了文件 "${media.fileName}"(${(buffer.length / 1024).toFixed(1)} KB,二进制文件)` }];
388
408
  }
389
409
  } else {
390
- agentMessages = [{ role: "user", content: `用户发送了文件 "${media.fileName}"(${(buffer.length / 1024).toFixed(1)} KB)` }];
410
+ const prefix = (media.fromRef && text) ? `${text}\n\n` : "";
411
+ agentMessages = [{ role: "user", content: `${prefix}用户发送了文件 "${media.fileName}"(${(buffer.length / 1024).toFixed(1)} KB)` }];
391
412
  }
392
413
  } catch (err) {
393
414
  console.error(pc.red(` 文件下载失败: ${err.message}`));
394
415
  continue;
395
416
  }
396
417
  } else if (media?.type === "video") {
397
- console.log(pc.cyan(`← [微信] ${from}: [视频]`));
398
- agentMessages = [{ role: "user", content: "用户发送了一段视频" }];
418
+ const label = media.fromRef ? "引用视频" : "视频";
419
+ console.log(pc.cyan(`← [微信] ${from}: [${label}]`));
420
+ const prefix = (media.fromRef && text) ? `${text}\n\n` : "";
421
+ agentMessages = [{ role: "user", content: `${prefix}用户发送了一段视频` }];
399
422
  } else {
400
423
  continue;
401
424
  }
package/cli/weixin.mjs CHANGED
@@ -391,24 +391,77 @@ export async function sendTyping(token, userId, typingTicket, status = 1) {
391
391
  }
392
392
 
393
393
  /**
394
- * 提取消息文本(支持语音转文字)
394
+ * 提取消息文本(支持语音转文字 + 引用消息)
395
+ *
396
+ * 引用消息结构 (ref_msg):
397
+ * item.ref_msg.title — 引用摘要
398
+ * item.ref_msg.message_item — 引用的原始 item(可能是文字/图片/文件等)
399
+ *
400
+ * 参考: openclaw-weixin inbound.ts#L86-98
395
401
  */
396
402
  export function extractText(msg) {
397
403
  const items = msg.item_list || [];
398
404
  for (const item of items) {
399
- if (item.type === 1 && item.text_item?.text) return item.text_item.text;
405
+ if (item.type === 1 && item.text_item?.text) {
406
+ const text = item.text_item.text;
407
+ const ref = item.ref_msg;
408
+ if (!ref) return text;
409
+
410
+ // 引用的是媒体(图片/文件/视频) → 只返回用户文字,媒体由 extractMedia 处理
411
+ if (ref.message_item && isMediaItem(ref.message_item)) return text;
412
+
413
+ // 引用的是文字 → 拼接引用上下文
414
+ const parts = [];
415
+ if (ref.title) parts.push(ref.title);
416
+ if (ref.message_item?.text_item?.text) parts.push(ref.message_item.text_item.text);
417
+ if (!parts.length) return text;
418
+ return `[引用: ${parts.join(" | ")}]\n${text}`;
419
+ }
400
420
  // 语音转文字(微信自带)
401
421
  if (item.type === 3 && item.voice_item?.text) return item.voice_item.text;
402
422
  }
403
423
  return "";
404
424
  }
405
425
 
426
+ /** 判断 item 是否为媒体类型 (image=2, voice=3, file=4, video=5) */
427
+ function isMediaItem(item) {
428
+ return item.type === 2 || item.type === 3 || item.type === 4 || item.type === 5;
429
+ }
430
+
406
431
  /**
407
- * 提取多媒体信息
408
- * @returns {{ type: 'image'|'voice'|'file'|'video', encryptQueryParam, aesKey, fileName?, voiceText? } | null}
432
+ * 提取多媒体信息(含引用消息中的媒体)
433
+ *
434
+ * 优先级:主消息中的媒体 > 引用消息中的媒体
435
+ * 参考: openclaw-weixin process-message.ts#L112-138
436
+ *
437
+ * @returns {{ type: 'image'|'voice'|'file'|'video', encryptQueryParam, aesKey, fileName?, voiceText?, fromRef? } | null}
409
438
  */
410
439
  export function extractMedia(msg) {
411
440
  const items = msg.item_list || [];
441
+
442
+ // 1. 先扫描主消息中的媒体
443
+ const mainMedia = extractMediaFromItem(items);
444
+ if (mainMedia) return mainMedia;
445
+
446
+ // 2. 无主媒体时,检查引用消息中的媒体 (ref_msg)
447
+ for (const item of items) {
448
+ if (item.type === 1 && item.ref_msg?.message_item && isMediaItem(item.ref_msg.message_item)) {
449
+ const refMedia = extractMediaFromItem([item.ref_msg.message_item]);
450
+ if (refMedia) {
451
+ refMedia.fromRef = true; // 标记来自引用
452
+ return refMedia;
453
+ }
454
+ }
455
+ }
456
+
457
+ return null;
458
+ }
459
+
460
+ /**
461
+ * 从 item 列表中提取第一个媒体
462
+ * @private
463
+ */
464
+ function extractMediaFromItem(items) {
412
465
  for (const item of items) {
413
466
  // 图片
414
467
  if (item.type === 2 && item.image_item?.media?.encrypt_query_param) {
@@ -434,7 +487,6 @@ export function extractMedia(msg) {
434
487
  }
435
488
  // 文件
436
489
  if (item.type === 4 && item.file_item?.media?.encrypt_query_param) {
437
-
438
490
  return {
439
491
  type: "file",
440
492
  encryptQueryParam: item.file_item.media.encrypt_query_param,
@@ -444,7 +496,6 @@ export function extractMedia(msg) {
444
496
  }
445
497
  // 视频
446
498
  if (item.type === 5 && item.video_item?.media?.encrypt_query_param) {
447
-
448
499
  return {
449
500
  type: "video",
450
501
  encryptQueryParam: item.video_item.media.encrypt_query_param,
@@ -0,0 +1,24 @@
1
+ # OpenCode + wechat-to-anything
2
+
3
+ ## 前置条件
4
+
5
+ ```bash
6
+ # 安装 OpenCode (任选一种)
7
+ brew install anomalyco/tap/opencode
8
+ # 或
9
+ npm i -g opencode-ai
10
+ ```
11
+
12
+ 然后配置 AI provider,参考 [opencode.ai/docs](https://opencode.ai/docs)。
13
+
14
+ ## 启动
15
+
16
+ ```bash
17
+ # 终端 1: 启动 OpenCode HTTP 服务
18
+ node server.mjs
19
+
20
+ # 终端 2: 启动微信桥
21
+ npx wechat-to-anything http://localhost:3000/v1
22
+ ```
23
+
24
+ 完成!微信消息会自动转发给 OpenCode,回复会发回微信。
@@ -0,0 +1,107 @@
1
+ /**
2
+ * OpenCode → OpenAI 兼容 HTTP 服务
3
+ *
4
+ * 通过 `opencode run "prompt"` 子进程调用 OpenCode,
5
+ * 包装成标准 HTTP 接口供 wechat-to-anything 连接。
6
+ *
7
+ * 前置条件:
8
+ * npm i -g opencode-ai (或 brew install anomalyco/tap/opencode)
9
+ * 配置好 AI provider: opencode providers login
10
+ *
11
+ * 用法:
12
+ * node server.mjs
13
+ * # 然后另一个终端:
14
+ * npx wechat-to-anything http://localhost:3000/v1
15
+ *
16
+ * 可选环境变量:
17
+ * PORT=3000 HTTP 端口
18
+ * OPENCODE_MODEL=xxx 指定模型 (格式: provider/model)
19
+ */
20
+
21
+ import { createServer } from "node:http";
22
+ import { spawn } from "node:child_process";
23
+
24
+ const PORT = process.env.PORT || 3000;
25
+ const MODEL = process.env.OPENCODE_MODEL || "";
26
+
27
+ const server = createServer(async (req, res) => {
28
+ if (req.method !== "POST" || !req.url.startsWith("/v1/chat/completions")) {
29
+ res.writeHead(404);
30
+ res.end("Not Found");
31
+ return;
32
+ }
33
+
34
+ const body = await readBody(req);
35
+ const { messages } = JSON.parse(body);
36
+ const userMessage =
37
+ messages?.findLast((m) => m.role === "user")?.content || "";
38
+
39
+ try {
40
+ const result = await runOpenCode(userMessage);
41
+
42
+ res.writeHead(200, { "Content-Type": "application/json" });
43
+ res.end(
44
+ JSON.stringify({
45
+ choices: [
46
+ {
47
+ message: { role: "assistant", content: result },
48
+ },
49
+ ],
50
+ })
51
+ );
52
+ } catch (err) {
53
+ console.error(` ✗ ${err.message.slice(0, 120)}`);
54
+ res.writeHead(500, { "Content-Type": "application/json" });
55
+ res.end(JSON.stringify({ error: err.message }));
56
+ }
57
+ });
58
+
59
+ server.listen(PORT, () => {
60
+ console.log(`⌬ OpenCode Agent 运行在 http://localhost:${PORT}/v1`);
61
+ if (MODEL) console.log(` 模型: ${MODEL}`);
62
+ console.log(
63
+ ` 然后运行: npx wechat-to-anything http://localhost:${PORT}/v1`
64
+ );
65
+ });
66
+
67
+ /**
68
+ * 通过 opencode run "prompt" 非交互模式调用
69
+ */
70
+ function runOpenCode(prompt) {
71
+ return new Promise((resolve, reject) => {
72
+ const args = ["run", prompt];
73
+ if (MODEL) args.push("-m", MODEL);
74
+
75
+ const child = spawn("opencode", args, {
76
+ stdio: ["ignore", "pipe", "pipe"],
77
+ timeout: 300_000,
78
+ });
79
+
80
+ let stdout = "";
81
+ let stderr = "";
82
+
83
+ child.stdout.on("data", (d) => (stdout += d));
84
+ child.stderr.on("data", (d) => (stderr += d));
85
+
86
+ child.on("close", (code) => {
87
+ // opencode run 输出可能包含 ANSI 颜色码,清理掉
88
+ const clean = (s) => s.replace(/\x1b\[[0-9;]*m/g, "").trim();
89
+ if (code !== 0) {
90
+ const errMsg = clean(stderr + stdout) || `exit code ${code}`;
91
+ reject(new Error(errMsg.slice(0, 300)));
92
+ } else {
93
+ resolve(clean(stdout) || "(empty response)");
94
+ }
95
+ });
96
+
97
+ child.on("error", (err) => reject(err));
98
+ });
99
+ }
100
+
101
+ function readBody(req) {
102
+ return new Promise((resolve) => {
103
+ let data = "";
104
+ req.on("data", (chunk) => (data += chunk));
105
+ req.on("end", () => resolve(data));
106
+ });
107
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "wechat-to-anything",
3
- "version": "0.6.13",
3
+ "version": "0.6.15",
4
4
  "description": "一条命令,把微信变成任何 AI Agent 的入口",
5
5
  "type": "module",
6
6
  "bin": {
@@ -20,7 +20,8 @@
20
20
  "claude-code",
21
21
  "openai",
22
22
  "codex",
23
- "anthropic"
23
+ "anthropic",
24
+ "opencode"
24
25
  ],
25
26
  "author": "kellyvv",
26
27
  "license": "MIT",