nonebot-plugin-dotcharacter 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,241 @@
1
+ """角色加载器 — 扫描 dot-skill / colleague-skill 输出目录,解析 SKILL.md 角色文件。
2
+
3
+ 支持从多个目录加载角色(如本地 dot-skill + 社区 colleague-skill 仓库)。
4
+ 同名 slug 以先扫描到的为准。
5
+ """
6
+
7
+ import json
8
+ import re
9
+ from dataclasses import dataclass, field
10
+ from pathlib import Path
11
+ from typing import Dict, List, Optional
12
+
13
+ import yaml
14
+
15
+
16
+ @dataclass
17
+ class CharacterMeta:
18
+ """角色元数据。"""
19
+ slug: str
20
+ family: str # colleague / relationship / celebrity
21
+ display_name: str
22
+ description: str
23
+ language: str = "zh-CN"
24
+ tags: List[str] = field(default_factory=list)
25
+ persona_prompt: str = ""
26
+ work_prompt: str = ""
27
+ combined_prompt: str = ""
28
+ source_files: List[str] = field(default_factory=list)
29
+ source_root: str = ""
30
+
31
+
32
+ def _parse_frontmatter(content: str) -> tuple[dict, str]:
33
+ fm_match = re.match(r"^---\s*\n(.*?)\n---\s*\n", content, re.DOTALL)
34
+ if fm_match:
35
+ try:
36
+ meta = yaml.safe_load(fm_match.group(1)) or {}
37
+ except yaml.YAMLError:
38
+ meta = {}
39
+ body = content[fm_match.end():]
40
+ return meta, body
41
+ return {}, content
42
+
43
+
44
+ def _load_json(path: Path) -> dict:
45
+ try:
46
+ return json.loads(path.read_text(encoding="utf-8"))
47
+ except (json.JSONDecodeError, OSError):
48
+ return {}
49
+
50
+
51
+ def _extract_part_b(content: str) -> str:
52
+ match = re.search(
53
+ r"## PART B[::].*?\n(.*?)(?=\n## PART |\Z)",
54
+ content, re.DOTALL,
55
+ )
56
+ if match:
57
+ return match.group(0).strip()
58
+ match = re.search(
59
+ r"# Persona\b.*?(?=\n## PART |\n# Work|\Z)",
60
+ content, re.DOTALL,
61
+ )
62
+ if match:
63
+ return match.group(0).strip()
64
+ return ""
65
+
66
+
67
+ def _extract_part_a(content: str) -> str:
68
+ match = re.search(
69
+ r"## PART A[::].*?\n(.*?)(?=\n## PART B|\Z)",
70
+ content, re.DOTALL,
71
+ )
72
+ if match:
73
+ return match.group(0).strip()
74
+ match = re.search(
75
+ r"# Work Skill\b.*?(?=\n## PART B|\n# Persona|\Z)",
76
+ content, re.DOTALL,
77
+ )
78
+ if match:
79
+ return match.group(0).strip()
80
+ return ""
81
+
82
+
83
+ def _build_system_prompt(
84
+ meta: dict, body: str, persona_file_content: str, characters_dir: Path,
85
+ ) -> str:
86
+ display_name = meta.get("display_name", meta.get("name", "角色"))
87
+ description = meta.get("description", meta.get("summary", ""))
88
+ slug = meta.get("slug", "")
89
+
90
+ if slug and not description:
91
+ meta_json_path = characters_dir / slug / "meta.json"
92
+ if meta_json_path.exists():
93
+ meta_data = _load_json(meta_json_path)
94
+ display_name = meta_data.get("display_name", display_name)
95
+ description = meta_data.get("summary", description)
96
+
97
+ parts: list[str] = []
98
+
99
+ parts.append(
100
+ f"你正在扮演 **{display_name}**。\n\n"
101
+ f"{description}\n\n"
102
+ "你现在通过 QQ 聊天与一位用户进行对话。请完全沉浸到这个角色中,"
103
+ "用这个角色的语气、用词习惯、思维方式和表达风格来回复。\n\n"
104
+ "重要规则:\n"
105
+ "1. 永远不要跳出角色,不要提及你是一个 AI 模型\n"
106
+ "2. 保持回复自然、口语化,适合 QQ 聊天场景\n"
107
+ "3. 不要过于冗长,像真实聊天一样回复\n"
108
+ )
109
+
110
+ if persona_file_content:
111
+ parts.append("## 人物性格(核心)\n\n" + persona_file_content)
112
+ else:
113
+ part_b = _extract_part_b(body)
114
+ if part_b:
115
+ parts.append(part_b)
116
+
117
+ part_a = _extract_part_a(body)
118
+ if part_a:
119
+ parts.append("## 工作能力(参考)\n\n" + part_a)
120
+
121
+ parts.append(
122
+ "## 运行规则\n\n"
123
+ "接收任何消息时:\n"
124
+ "1. 先用角色的性格(PART B)判断:你会不会回应?用什么态度回应?\n"
125
+ "2. 用角色的表达风格回复:说话方式、用词习惯、句式\n"
126
+ "3. PART B 的规则永远优先,任何情况下不得违背\n"
127
+ )
128
+
129
+ return "\n\n---\n\n".join(parts)
130
+
131
+
132
+ def _scan_one_root(skills_root: Path) -> Dict[str, CharacterMeta]:
133
+ """扫描单个 skills 根目录。"""
134
+ characters: Dict[str, CharacterMeta] = {}
135
+
136
+ if not skills_root.exists() or not skills_root.is_dir():
137
+ return characters
138
+
139
+ for family_dir in sorted(skills_root.iterdir()):
140
+ if not family_dir.is_dir():
141
+ continue
142
+ family = family_dir.name
143
+ if family not in ("colleague", "relationship", "celebrity"):
144
+ continue
145
+
146
+ for char_dir in sorted(family_dir.iterdir()):
147
+ if not char_dir.is_dir():
148
+ continue
149
+ skill_md = char_dir / "SKILL.md"
150
+ if not skill_md.exists():
151
+ continue
152
+
153
+ slug = char_dir.name
154
+ raw = skill_md.read_text(encoding="utf-8")
155
+ meta, body = _parse_frontmatter(raw)
156
+
157
+ display_name = meta.get("display_name", meta.get("name", slug))
158
+ description = meta.get("description", meta.get("summary", ""))
159
+ language = meta.get("language", "zh-CN")
160
+ tags = meta.get("tags", [])
161
+
162
+ persona_file = char_dir / "persona.md"
163
+ persona_content = ""
164
+ if persona_file.exists():
165
+ persona_content = persona_file.read_text(encoding="utf-8")
166
+
167
+ meta_json_path = char_dir / "meta.json"
168
+ if meta_json_path.exists():
169
+ meta_data = _load_json(meta_json_path)
170
+ display_name = meta_data.get("display_name", display_name)
171
+ description = meta_data.get("summary", description)
172
+ language = meta_data.get("classification", {}).get("language", language)
173
+ tags = meta_data.get("tags", tags)
174
+
175
+ if display_name:
176
+ meta["display_name"] = display_name
177
+ if description:
178
+ meta["description"] = description
179
+
180
+ persona_prompt = persona_content or _extract_part_b(body)
181
+ work_prompt = _extract_part_a(body)
182
+ combined_prompt = _build_system_prompt(meta, body, persona_content, skills_root)
183
+
184
+ characters[slug] = CharacterMeta(
185
+ slug=slug,
186
+ family=family,
187
+ display_name=display_name,
188
+ description=description,
189
+ language=language,
190
+ tags=tags,
191
+ persona_prompt=persona_prompt,
192
+ work_prompt=work_prompt,
193
+ combined_prompt=combined_prompt,
194
+ source_files=[str(skill_md)],
195
+ source_root=str(skills_root),
196
+ )
197
+
198
+ return characters
199
+
200
+
201
+ def scan_characters(skills_roots: List[Path]) -> Dict[str, CharacterMeta]:
202
+ """扫描多个 skills 根目录,合并发现的所有角色。
203
+
204
+ 目录结构(dot-skill / colleague-skill 输出):
205
+ ```
206
+ skills/
207
+ ├── colleague/<slug>/SKILL.md + meta.json + persona.md
208
+ ├── relationship/<slug>/SKILL.md + ...
209
+ └── celebrity/<slug>/SKILL.md + ...
210
+ ```
211
+
212
+ slug 冲突时,先扫描的目录优先。
213
+ """
214
+ all_characters: Dict[str, CharacterMeta] = {}
215
+ for root in skills_roots:
216
+ found = _scan_one_root(root)
217
+ for slug, char in found.items():
218
+ if slug not in all_characters:
219
+ all_characters[slug] = char
220
+ return all_characters
221
+
222
+
223
+ def resolve_character(
224
+ name_or_slug: str, characters: Dict[str, CharacterMeta],
225
+ ) -> Optional[CharacterMeta]:
226
+ """用名称或 slug 模糊匹配角色。"""
227
+ if not name_or_slug:
228
+ return None
229
+ lowered = name_or_slug.lower().strip()
230
+ if lowered in characters:
231
+ return characters[lowered]
232
+ for c in characters.values():
233
+ if c.display_name == name_or_slug.strip():
234
+ return c
235
+ for slug, c in characters.items():
236
+ if lowered in slug:
237
+ return c
238
+ for c in characters.values():
239
+ if lowered in c.display_name:
240
+ return c
241
+ return None
@@ -0,0 +1,161 @@
1
+ """配置模型 — 通过 NoneBot 的 Config 或 .env 文件读取。"""
2
+
3
+ from pathlib import Path
4
+ from typing import Dict, List
5
+ from nonebot import get_plugin_config
6
+ from pydantic import BaseModel, Field, field_validator
7
+
8
+
9
+ # ── 大模型 Provider 预设 ──
10
+ PROVIDER_PRESETS: Dict[str, dict] = {
11
+ "openai": {
12
+ "base_url": "https://api.openai.com/v1",
13
+ "models": ["gpt-4o", "gpt-4o-mini", "gpt-4.1", "o4-mini"],
14
+ },
15
+ "deepseek": {
16
+ "base_url": "https://api.deepseek.com",
17
+ "models": ["deepseek-chat", "deepseek-reasoner"],
18
+ },
19
+ "kimi": {
20
+ "base_url": "https://api.moonshot.cn/v1",
21
+ "models": ["moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"],
22
+ },
23
+ "qwen": {
24
+ "base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
25
+ "models": ["qwen-plus", "qwen-max", "qwen-turbo"],
26
+ },
27
+ "zhipu": {
28
+ "base_url": "https://open.bigmodel.cn/api/paas/v4",
29
+ "models": ["glm-4-plus", "glm-4-flash", "glm-4-air"],
30
+ },
31
+ "siliconflow": {
32
+ "base_url": "https://api.siliconflow.cn/v1",
33
+ "models": ["Qwen/Qwen2.5-72B-Instruct", "deepseek-ai/DeepSeek-V3"],
34
+ },
35
+ "groq": {
36
+ "base_url": "https://api.groq.com/openai/v1",
37
+ "models": ["llama-3.3-70b-versatile", "mixtral-8x7b-32768"],
38
+ },
39
+ "ollama": {
40
+ "base_url": "http://localhost:11434/v1",
41
+ "models": ["llama3", "qwen2.5", "mistral"],
42
+ },
43
+ "custom": {
44
+ "base_url": "https://api.openai.com/v1",
45
+ "models": ["gpt-4o-mini"],
46
+ },
47
+ }
48
+
49
+
50
+ class DotCharacterConfig(BaseModel):
51
+ """dot-skill 角色扮演插件配置。"""
52
+
53
+ # ── LLM Provider ──
54
+ dotcharacter_provider: str = Field(
55
+ default="custom",
56
+ description="大模型 Provider 预设:openai / deepseek / kimi / qwen / zhipu / siliconflow / groq / ollama / custom",
57
+ )
58
+ dotcharacter_api_base: str = Field(
59
+ default="https://api.openai.com/v1",
60
+ description="API 地址(provider=custom 时使用)",
61
+ )
62
+ dotcharacter_api_key: str = Field(
63
+ default="",
64
+ description="API Key",
65
+ )
66
+ dotcharacter_model: str = Field(
67
+ default="gpt-4o-mini",
68
+ description="模型名称",
69
+ )
70
+ dotcharacter_temperature: float = Field(
71
+ default=0.8, ge=0.0, le=2.0,
72
+ )
73
+ dotcharacter_max_tokens: int = Field(
74
+ default=1024, ge=1, le=32768,
75
+ )
76
+ dotcharacter_timeout: int = Field(
77
+ default=60, ge=5,
78
+ )
79
+
80
+ # ── 角色目录(逗号分隔多个路径)──
81
+ dotcharacter_skills_path: str = Field(
82
+ default="",
83
+ description="dot-skill / colleague-skill 角色目录,逗号分隔",
84
+ )
85
+
86
+ # ── 对话 ──
87
+ dotcharacter_max_history: int = Field(
88
+ default=20, ge=0, le=100,
89
+ )
90
+
91
+ # ── 权限 ──
92
+ dotcharacter_admin_qq: str = Field(default="")
93
+ dotcharacter_allowed_groups: str = Field(default="")
94
+
95
+ def get_admin_qqs(self) -> List[str]:
96
+ return [q.strip() for q in str(self.dotcharacter_admin_qq).split(",") if q.strip()]
97
+
98
+ def get_allowed_groups(self) -> List[str]:
99
+ return [g.strip() for g in str(self.dotcharacter_allowed_groups).split(",") if g.strip()]
100
+
101
+ @field_validator("dotcharacter_admin_qq", "dotcharacter_allowed_groups", mode="before")
102
+ @classmethod
103
+ def _coerce_to_str(cls, v):
104
+ return str(v) if v is not None else ""
105
+
106
+ def get_api_base(self) -> str:
107
+ if self.dotcharacter_provider and self.dotcharacter_provider != "custom":
108
+ preset = PROVIDER_PRESETS.get(self.dotcharacter_provider)
109
+ if preset:
110
+ return preset["base_url"]
111
+ return self.dotcharacter_api_base
112
+
113
+ def get_available_models(self) -> List[str]:
114
+ if self.dotcharacter_provider and self.dotcharacter_provider != "custom":
115
+ preset = PROVIDER_PRESETS.get(self.dotcharacter_provider)
116
+ if preset:
117
+ return preset["models"]
118
+ return [self.dotcharacter_model]
119
+
120
+ def resolve_skills_paths(self) -> List[Path]:
121
+ """解析所有角色目录,支持逗号分隔 + 自动发现。
122
+
123
+ 检测顺序:
124
+ 1. DOTCHARACTER_SKILLS_PATH 指定的路径(逗号分隔)
125
+ 2. Claude Code / Hermes / OpenClaw 默认位置
126
+ 3. colleague-skill 仓库常见克隆位置
127
+ """
128
+ candidates: List[Path] = []
129
+
130
+ if self.dotcharacter_skills_path:
131
+ for p in self.dotcharacter_skills_path.split(","):
132
+ p = p.strip()
133
+ if p:
134
+ candidates.append(Path(p))
135
+
136
+ home = Path.home()
137
+ candidates.append(home / ".claude" / "skills" / "dot-skill" / "skills")
138
+ candidates.append(home / ".hermes" / "skills" / "dot-skill" / "skills")
139
+ candidates.append(home / ".openclaw" / "workspace" / "skills" / "dot-skill" / "skills")
140
+ candidates.append(home / "colleague-skill" / "skills")
141
+ candidates.append(home / "dot-skill" / "skills")
142
+ candidates.append(home / "Documents" / "colleague-skill" / "skills")
143
+ candidates.append(home / "code" / "colleague-skill" / "skills")
144
+ candidates.append(home / "projects" / "colleague-skill" / "skills")
145
+
146
+ seen: set = set()
147
+ result: List[Path] = []
148
+ for p in candidates:
149
+ try:
150
+ resolved = p.resolve()
151
+ except OSError:
152
+ continue
153
+ key = str(resolved)
154
+ if resolved.exists() and resolved.is_dir() and key not in seen:
155
+ seen.add(key)
156
+ result.append(resolved)
157
+ return result
158
+
159
+
160
+ def get_config() -> DotCharacterConfig:
161
+ return get_plugin_config(DotCharacterConfig)
@@ -0,0 +1,107 @@
1
+ """会话管理 — 每个用户 × 每个角色维护独立对话历史。"""
2
+
3
+ import time
4
+ from collections import OrderedDict
5
+ from dataclasses import dataclass, field
6
+ from typing import Dict, List, Optional
7
+
8
+
9
+ @dataclass
10
+ class ConversationSession:
11
+ """单个用户对单个角色的对话会话。"""
12
+ user_id: str
13
+ character_slug: str
14
+ messages: List[dict] = field(default_factory=list) # [{"role":"user"|"assistant","content":"..."}]
15
+ created_at: float = field(default_factory=time.time)
16
+ last_active_at: float = field(default_factory=time.time)
17
+
18
+ def add_user_message(self, content: str) -> None:
19
+ self.messages.append({"role": "user", "content": content})
20
+ self.last_active_at = time.time()
21
+
22
+ def add_assistant_message(self, content: str) -> None:
23
+ self.messages.append({"role": "assistant", "content": content})
24
+ self.last_active_at = time.time()
25
+
26
+ def trim(self, max_history: int) -> None:
27
+ """保留最近 N 轮对话(每轮 = user + assistant)。"""
28
+ if max_history <= 0:
29
+ self.messages.clear()
30
+ return
31
+ # max_history 指的是消息数,保留最后 N 条
32
+ if len(self.messages) > max_history:
33
+ self.messages = self.messages[-max_history:]
34
+
35
+ @property
36
+ def is_empty(self) -> bool:
37
+ return len(self.messages) == 0
38
+
39
+
40
+ class ConversationManager:
41
+ """管理所有用户的会话。
42
+
43
+ 内部使用 OrderedDict 做简单的 LRU 淘汰,
44
+ 避免长时间运行时内存无限增长。
45
+ """
46
+
47
+ MAX_SESSIONS = 500 # 最多保留 500 个会话
48
+
49
+ def __init__(self) -> None:
50
+ self._sessions: OrderedDict[str, ConversationSession] = OrderedDict()
51
+ self._active_character: Dict[str, str] = {} # user_id → slug
52
+
53
+ def _make_key(self, user_id: str, slug: str) -> str:
54
+ return f"{user_id}::{slug}"
55
+
56
+ def get_session(self, user_id: str, slug: str) -> ConversationSession:
57
+ """获取或创建会话。"""
58
+ key = self._make_key(user_id, slug)
59
+ if key not in self._sessions:
60
+ if len(self._sessions) >= self.MAX_SESSIONS:
61
+ # 淘汰最老的会话
62
+ self._sessions.popitem(last=False)
63
+ self._sessions[key] = ConversationSession(
64
+ user_id=user_id,
65
+ character_slug=slug,
66
+ )
67
+ else:
68
+ # 移到末尾(最近使用)
69
+ self._sessions.move_to_end(key)
70
+ return self._sessions[key]
71
+
72
+ def reset_session(self, user_id: str, slug: str) -> None:
73
+ """重置某用户对某角色的对话。"""
74
+ key = self._make_key(user_id, slug)
75
+ self._sessions.pop(key, None)
76
+
77
+ def set_active_character(self, user_id: str, slug: str) -> None:
78
+ self._active_character[user_id] = slug
79
+
80
+ def get_active_character(self, user_id: str) -> Optional[str]:
81
+ return self._active_character.get(user_id)
82
+
83
+ def clear_active_character(self, user_id: str) -> None:
84
+ self._active_character.pop(user_id, None)
85
+
86
+ def cleanup_stale(self, max_age_seconds: float = 3600.0) -> int:
87
+ """清理超过 max_age_seconds 未活跃的会话。返回清理数量。"""
88
+ now = time.time()
89
+ stale_keys = [
90
+ key
91
+ for key, session in self._sessions.items()
92
+ if now - session.last_active_at > max_age_seconds
93
+ ]
94
+ for key in stale_keys:
95
+ self._sessions.pop(key, None)
96
+ return len(stale_keys)
97
+
98
+
99
+ # 全局单例
100
+ _conversation_manager: Optional[ConversationManager] = None
101
+
102
+
103
+ def get_conversation_manager() -> ConversationManager:
104
+ global _conversation_manager
105
+ if _conversation_manager is None:
106
+ _conversation_manager = ConversationManager()
107
+ return _conversation_manager
@@ -0,0 +1,91 @@
1
+ """LLM 客户端 — 调用 OpenAI 兼容 API 进行角色对话。
2
+
3
+ 支持所有 OpenAI Chat Completions 兼容的 API:
4
+ OpenAI / DeepSeek / Kimi / Qwen / Zhipu / SiliconFlow / Groq / Ollama / 自定义
5
+ """
6
+
7
+ from typing import List, Optional
8
+
9
+ import httpx
10
+
11
+ from .config import DotCharacterConfig
12
+
13
+
14
+ def _msg(role: str, content: str) -> dict:
15
+ return {"role": role, "content": content}
16
+
17
+
18
+ def system_msg(content: str) -> dict:
19
+ return _msg("system", content)
20
+
21
+
22
+ def user_msg(name: str, content: str) -> dict:
23
+ return _msg("user", f"{name}: {content}")
24
+
25
+
26
+ def assistant_msg(content: str) -> dict:
27
+ return _msg("assistant", content)
28
+
29
+
30
+ async def chat_completion(
31
+ config: DotCharacterConfig,
32
+ messages: List[dict],
33
+ max_tokens: Optional[int] = None,
34
+ ) -> str:
35
+ """发送聊天请求到 LLM,返回助手回复文本。
36
+
37
+ 自动根据 Provider 预设或自定义 api_base 构造请求 URL。
38
+ 兼容所有 OpenAI Chat Completions 格式的 API。
39
+ """
40
+ api_key = config.dotcharacter_api_key
41
+ if not api_key or api_key.startswith("sk-your-"):
42
+ raise ValueError(
43
+ "未配置有效的 DOTCHARACTER_API_KEY。"
44
+ "请在 .env 中设置真实的 API Key。"
45
+ )
46
+
47
+ base_url = config.get_api_base().rstrip("/")
48
+ url = f"{base_url}/chat/completions"
49
+ headers = {
50
+ "Authorization": f"Bearer {api_key}",
51
+ "Content-Type": "application/json",
52
+ }
53
+ payload = {
54
+ "model": config.dotcharacter_model,
55
+ "messages": messages,
56
+ "temperature": config.dotcharacter_temperature,
57
+ "max_tokens": max_tokens or config.dotcharacter_max_tokens,
58
+ }
59
+
60
+ async with httpx.AsyncClient(timeout=config.dotcharacter_timeout) as client:
61
+ response = await client.post(url, json=payload, headers=headers)
62
+
63
+ if response.status_code != 200:
64
+ err_detail = response.text[:500]
65
+ raise RuntimeError(
66
+ f"LLM API 返回错误 (HTTP {response.status_code}): {err_detail}"
67
+ )
68
+
69
+ data = response.json()
70
+ choices = data.get("choices", [])
71
+ if not choices:
72
+ raise RuntimeError(f"LLM API 返回空 choices: {data}")
73
+
74
+ content = choices[0].get("message", {}).get("content", "")
75
+ return content.strip()
76
+
77
+
78
+ async def test_api_connection(config: DotCharacterConfig) -> bool:
79
+ """快速测试 API 连接是否正常。"""
80
+ try:
81
+ result = await chat_completion(
82
+ config,
83
+ messages=[
84
+ system_msg("回复 OK,只输出这两个字母。"),
85
+ user_msg("test", "ping"),
86
+ ],
87
+ max_tokens=10,
88
+ )
89
+ return "OK" in result.upper()
90
+ except Exception:
91
+ return False