nonebot-plugin-skills 0.1.2__tar.gz → 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {nonebot_plugin_skills-0.1.2 → nonebot_plugin_skills-0.2.0}/PKG-INFO +8 -1
- {nonebot_plugin_skills-0.1.2 → nonebot_plugin_skills-0.2.0}/README.md +7 -0
- {nonebot_plugin_skills-0.1.2 → nonebot_plugin_skills-0.2.0}/nonebot_plugin_skills/__init__.py +511 -108
- {nonebot_plugin_skills-0.1.2 → nonebot_plugin_skills-0.2.0}/nonebot_plugin_skills/config.py +6 -0
- {nonebot_plugin_skills-0.1.2 → nonebot_plugin_skills-0.2.0}/pyproject.toml +1 -1
- {nonebot_plugin_skills-0.1.2 → nonebot_plugin_skills-0.2.0}/LICENSE +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: nonebot-plugin-skills
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.2.0
|
|
4
4
|
Summary: Gemini avatar/image and chat plugin with skills and context cache for NoneBot2
|
|
5
5
|
License-File: LICENSE
|
|
6
6
|
Author: yourname
|
|
@@ -29,6 +29,7 @@ Description-Content-Type: text/markdown
|
|
|
29
29
|
- 聊天对话:带上下文的自然语言聊天
|
|
30
30
|
- 天气查询:输入城市/地区即可查询当前天气
|
|
31
31
|
- 上下文缓存:按群/私聊隔离,定时过期
|
|
32
|
+
- 历史记录压缩:超出阈值自动摘要保留关键信息
|
|
32
33
|
|
|
33
34
|
## 安装
|
|
34
35
|
在 NoneBot2 项目中安装依赖:
|
|
@@ -55,6 +56,12 @@ GEMINI_TEXT_MODEL=gemini-2.5-flash
|
|
|
55
56
|
GEMINI_IMAGE_MODEL=gemini-2.5-flash-image
|
|
56
57
|
HISTORY_TTL_SEC=600
|
|
57
58
|
HISTORY_MAX_MESSAGES=20
|
|
59
|
+
HISTORY_COMPRESS_ENABLE=true
|
|
60
|
+
HISTORY_COMPRESS_TRIGGER=20
|
|
61
|
+
HISTORY_COMPRESS_KEEP=6
|
|
62
|
+
HISTORY_COMPRESS_MIN_MESSAGES=6
|
|
63
|
+
HISTORY_COMPRESS_MAX_CHARS=600
|
|
64
|
+
HISTORY_REFERENCE_ONLY=true
|
|
58
65
|
GEMINI_LOG_RESPONSE=false
|
|
59
66
|
IMAGE_TIMEOUT=120
|
|
60
67
|
NLP_ENABLE=true
|
|
@@ -7,6 +7,7 @@
|
|
|
7
7
|
- 聊天对话:带上下文的自然语言聊天
|
|
8
8
|
- 天气查询:输入城市/地区即可查询当前天气
|
|
9
9
|
- 上下文缓存:按群/私聊隔离,定时过期
|
|
10
|
+
- 历史记录压缩:超出阈值自动摘要保留关键信息
|
|
10
11
|
|
|
11
12
|
## 安装
|
|
12
13
|
在 NoneBot2 项目中安装依赖:
|
|
@@ -33,6 +34,12 @@ GEMINI_TEXT_MODEL=gemini-2.5-flash
|
|
|
33
34
|
GEMINI_IMAGE_MODEL=gemini-2.5-flash-image
|
|
34
35
|
HISTORY_TTL_SEC=600
|
|
35
36
|
HISTORY_MAX_MESSAGES=20
|
|
37
|
+
HISTORY_COMPRESS_ENABLE=true
|
|
38
|
+
HISTORY_COMPRESS_TRIGGER=20
|
|
39
|
+
HISTORY_COMPRESS_KEEP=6
|
|
40
|
+
HISTORY_COMPRESS_MIN_MESSAGES=6
|
|
41
|
+
HISTORY_COMPRESS_MAX_CHARS=600
|
|
42
|
+
HISTORY_REFERENCE_ONLY=true
|
|
36
43
|
GEMINI_LOG_RESPONSE=false
|
|
37
44
|
IMAGE_TIMEOUT=120
|
|
38
45
|
NLP_ENABLE=true
|
{nonebot_plugin_skills-0.1.2 → nonebot_plugin_skills-0.2.0}/nonebot_plugin_skills/__init__.py
RENAMED
|
@@ -62,6 +62,7 @@ _MD_HRULE = re.compile(r"^\s*([-*_]\s*){3,}$", re.M)
|
|
|
62
62
|
_MD_LIST_MARKER = re.compile(r"^\s*(?:[-*+•·*]|\d+[).、])\s*")
|
|
63
63
|
_LINE_SPLIT_RE = re.compile(r"\r?\n+")
|
|
64
64
|
_SENTENCE_SPLIT_RE = re.compile(r"([。!?!?;;])")
|
|
65
|
+
_CLAUSE_SPLIT_RE = re.compile(r"([,,、;;])")
|
|
65
66
|
|
|
66
67
|
_MODEL_REPLY_MAX_CHARS = 0
|
|
67
68
|
_WAIT_NEXT_IMAGE_SEC = 60.0
|
|
@@ -75,7 +76,7 @@ _CHAT_SYSTEM_PROMPT = (
|
|
|
75
76
|
"Rules\n"
|
|
76
77
|
"1. 只当普通人,不要自称 AI/助手,也不要提系统或规则。\n"
|
|
77
78
|
"2. 输出纯文本,不使用 Markdown 或代码块。\n"
|
|
78
|
-
"3. 适合 QQ
|
|
79
|
+
"3. 适合 QQ 消息\n"
|
|
79
80
|
"4. 不要夸张表演,不堆流行语,不写长段解释。\n"
|
|
80
81
|
"5. 不清楚就简短追问,不编造细节。\n"
|
|
81
82
|
"6. 只回复当前消息,不要夹带已回复过的旧话题。\n"
|
|
@@ -87,7 +88,7 @@ _IMAGE_CHAT_SYSTEM_PROMPT = (
|
|
|
87
88
|
"你现在是asoul成员嘉然,会尽量满足提问者的帮助。\n"
|
|
88
89
|
"你在进行图片内容对话,只需回答当前指令或问题。\n"
|
|
89
90
|
"不要补充已回复过的历史话题,不要输出 Markdown 或代码块。\n"
|
|
90
|
-
"回答适合 QQ
|
|
91
|
+
"回答适合 QQ 消息,精炼、不啰嗦,简短、口语化,可自然换行;尽量用 2-3 句完整回答。\n"
|
|
91
92
|
)
|
|
92
93
|
|
|
93
94
|
_TRAVEL_SYSTEM_PROMPT = (
|
|
@@ -101,7 +102,7 @@ _TRAVEL_SYSTEM_PROMPT = (
|
|
|
101
102
|
_INTENT_SYSTEM_PROMPT = (
|
|
102
103
|
"你是消息意图解析器,只输出 JSON,不要解释或补充说明。"
|
|
103
104
|
"不要输出拒绝/免责声明/权限说明(例如“我无法访问账号”)。"
|
|
104
|
-
"
|
|
105
|
+
"只输出单一 JSON 对象,格式如下:"
|
|
105
106
|
"{"
|
|
106
107
|
"\"action\": \"chat|image_chat|image_generate|image_create|weather|avatar_get|travel_plan|history_clear|ignore\","
|
|
107
108
|
"\"target\": \"message_image|reply_image|at_user|last_image|sender_avatar|group_avatar|qq_avatar|message_id|wait_next|city|trip|none\","
|
|
@@ -109,34 +110,33 @@ _INTENT_SYSTEM_PROMPT = (
|
|
|
109
110
|
"\"params\": {\"qq\": \"string\", \"message_id\": \"int\", \"city\": \"string\","
|
|
110
111
|
" \"destination\": \"string\", \"days\": \"int\", \"nights\": \"int\", \"reply\": \"string\"}"
|
|
111
112
|
"}"
|
|
112
|
-
"
|
|
113
|
-
"- action=
|
|
114
|
-
"- action=
|
|
115
|
-
"- action=
|
|
116
|
-
"- action=
|
|
117
|
-
"- action=
|
|
118
|
-
"- action=
|
|
119
|
-
"- action=
|
|
120
|
-
"params.destination
|
|
121
|
-
"- action=history_clear
|
|
122
|
-
"- action=ignore
|
|
123
|
-
"- target
|
|
124
|
-
"
|
|
125
|
-
"
|
|
126
|
-
"
|
|
127
|
-
"
|
|
128
|
-
"
|
|
129
|
-
"params 里只在对应 target 时填写:"
|
|
130
|
-
"- target=qq_avatar 时填写 params.qq。"
|
|
131
|
-
"- target=message_id 时填写 params.message_id。"
|
|
132
|
-
"其他情况 params 为空对象。"
|
|
133
|
-
"若旅行或天气缺关键信息,仍输出对应 action,缺失字段留空。"
|
|
134
|
-
"上下文可能包含“昵称: 内容”的格式,需识别说话人。"
|
|
135
|
-
"如需发送等待/过渡语,可在 params.reply 中填写一句短句。"
|
|
136
|
-
" 如果文本包含多行,默认第一行是当前消息;只有当前消息无法判断时才参考后续上下文/回复内容。"
|
|
113
|
+
"规则:"
|
|
114
|
+
"- 除 action=ignore 外,instruction 必须为非空字符串,填写当前用户原话或提炼后的指令。"
|
|
115
|
+
"- action=chat:普通聊天;target=none。"
|
|
116
|
+
"- action=image_chat:聊这张图(不生成图);instruction 为问题/描述;。target 用于选图:message_image/reply_image/at_user/last_image/sender_avatar/group_avatar/qq_avatar/message_id/wait_next。"
|
|
117
|
+
"- action=image_generate:基于参考图生成/编辑;instruction 为处理指令。target 用于选图:message_image/reply_image/at_user/last_image/sender_avatar/group_avatar/qq_avatar/message_id/wait_next。"
|
|
118
|
+
"- action=image_create:无参考图生成;target=none。"
|
|
119
|
+
"- action=weather:查询天气;target=city;params.city 为地点(没有就留空)。"
|
|
120
|
+
"- action=avatar_get:获取头像;target 可为 sender_avatar/group_avatar/qq_avatar/at_user。"
|
|
121
|
+
"- action=travel_plan:旅行规划;target=trip;params.destination/days/nights 可填则填。"
|
|
122
|
+
"- action=history_clear:清除当前会话历史;target=none。"
|
|
123
|
+
"- action=ignore:不处理;instruction 为空字符串,target=none,params 为空对象。"
|
|
124
|
+
"- target=qq_avatar 时填写 params.qq;target=message_id 时填写 params.message_id。"
|
|
125
|
+
"- params 仅在对应 target/场景需要时填写,其余为空对象。"
|
|
126
|
+
"- 若旅行或天气缺关键信息,仍输出对应 action,缺失字段留空,instruction 仍填当前消息。"
|
|
127
|
+
"- 当需要调用第三方工具且可能耗时(如 weather、image_create、image_generate、image_chat、avatar_get、travel_plan)时,可在 params.reply 中给等待/过渡语。"
|
|
128
|
+
"- 若消息里 @ 多人,仍输出 target=at_user,系统会按顺序处理多个头像。"
|
|
129
|
+
"- 上下文可能包含“昵称: 内容”的格式,需识别说话人。"
|
|
137
130
|
)
|
|
138
131
|
|
|
139
132
|
_DUPLICATE_TEXT_TTL_SEC = 60.0
|
|
133
|
+
_HISTORY_SUMMARY_ITEM_MAX_CHARS = 400
|
|
134
|
+
|
|
135
|
+
_HISTORY_SUMMARY_SYSTEM_PROMPT = (
|
|
136
|
+
"你是对话摘要器,请将对话压缩成简短摘要。"
|
|
137
|
+
"保留关键信息、用户偏好、需求、结论与待办。"
|
|
138
|
+
"输出纯文本,不使用 Markdown、编号或引号。"
|
|
139
|
+
)
|
|
140
140
|
|
|
141
141
|
|
|
142
142
|
class UnsupportedImageError(RuntimeError):
|
|
@@ -294,6 +294,29 @@ def _split_sentences(text: str) -> List[str]:
|
|
|
294
294
|
return [item for item in sentences if item]
|
|
295
295
|
|
|
296
296
|
|
|
297
|
+
def _split_clauses(text: str) -> List[str]:
|
|
298
|
+
if not text:
|
|
299
|
+
return []
|
|
300
|
+
parts = _CLAUSE_SPLIT_RE.split(text)
|
|
301
|
+
clauses: List[str] = []
|
|
302
|
+
buffer = ""
|
|
303
|
+
for part in parts:
|
|
304
|
+
if not part:
|
|
305
|
+
continue
|
|
306
|
+
if _CLAUSE_SPLIT_RE.fullmatch(part):
|
|
307
|
+
buffer = f"{buffer}{part}"
|
|
308
|
+
if buffer.strip():
|
|
309
|
+
clauses.append(buffer.strip())
|
|
310
|
+
buffer = ""
|
|
311
|
+
else:
|
|
312
|
+
if buffer:
|
|
313
|
+
clauses.append(buffer.strip())
|
|
314
|
+
buffer = part
|
|
315
|
+
if buffer and buffer.strip():
|
|
316
|
+
clauses.append(buffer.strip())
|
|
317
|
+
return clauses
|
|
318
|
+
|
|
319
|
+
|
|
297
320
|
def _forward_line_threshold() -> int:
|
|
298
321
|
try:
|
|
299
322
|
threshold = int(getattr(config, "forward_line_threshold", 0))
|
|
@@ -327,8 +350,11 @@ async def _send_text_response(
|
|
|
327
350
|
if len(lines) <= _forward_line_threshold():
|
|
328
351
|
sentences = _split_sentences(str(text))
|
|
329
352
|
if len(sentences) <= 1:
|
|
330
|
-
|
|
331
|
-
|
|
353
|
+
clauses = _split_clauses(str(text))
|
|
354
|
+
if len(clauses) <= 1:
|
|
355
|
+
await send_func(text)
|
|
356
|
+
return
|
|
357
|
+
sentences = clauses
|
|
332
358
|
delay = _message_send_delay_sec()
|
|
333
359
|
for idx, sentence in enumerate(sentences):
|
|
334
360
|
await send_func(sentence)
|
|
@@ -362,6 +388,7 @@ async def _send_text_response(
|
|
|
362
388
|
|
|
363
389
|
|
|
364
390
|
def _transition_text(action: str) -> Optional[str]:
|
|
391
|
+
# 默认过渡语:耗时操作时给用户“正在处理”的提示
|
|
365
392
|
if action in {"image_create"}:
|
|
366
393
|
return "正在生成图片,请稍候..."
|
|
367
394
|
if action in {"image_generate"}:
|
|
@@ -372,6 +399,7 @@ def _transition_text(action: str) -> Optional[str]:
|
|
|
372
399
|
|
|
373
400
|
|
|
374
401
|
def _intent_transition_text(intent: dict) -> str:
|
|
402
|
+
# NLP 可选生成的过渡语(params.reply),有就用,没有就空字符串
|
|
375
403
|
params = _intent_params(intent)
|
|
376
404
|
reply = params.get("reply")
|
|
377
405
|
if isinstance(reply, str):
|
|
@@ -379,6 +407,14 @@ def _intent_transition_text(intent: dict) -> str:
|
|
|
379
407
|
return ""
|
|
380
408
|
|
|
381
409
|
|
|
410
|
+
def _resolve_transition_text(action: str, intent: dict) -> Optional[str]:
|
|
411
|
+
# 优先使用 intent 给的过渡语,否则回退默认提示
|
|
412
|
+
reply = _intent_transition_text(intent)
|
|
413
|
+
if reply:
|
|
414
|
+
return reply
|
|
415
|
+
return _transition_text(action)
|
|
416
|
+
|
|
417
|
+
|
|
382
418
|
async def _send_transition(action: str, send_func) -> None:
|
|
383
419
|
text = _transition_text(action)
|
|
384
420
|
if text:
|
|
@@ -470,6 +506,7 @@ class HistoryItem:
|
|
|
470
506
|
user_name: Optional[str] = None
|
|
471
507
|
to_bot: bool = False
|
|
472
508
|
message_id: Optional[int] = None
|
|
509
|
+
is_summary: bool = False
|
|
473
510
|
|
|
474
511
|
|
|
475
512
|
@dataclass
|
|
@@ -480,6 +517,9 @@ class SessionState:
|
|
|
480
517
|
pending_image_waiters: dict[str, asyncio.Future[str]]
|
|
481
518
|
handled_message_ids: dict[int, float]
|
|
482
519
|
handled_texts: dict[str, float]
|
|
520
|
+
history_lock: asyncio.Lock
|
|
521
|
+
summary_last_ts: float
|
|
522
|
+
summary_in_progress: bool
|
|
483
523
|
|
|
484
524
|
|
|
485
525
|
_SESSIONS: dict[str, SessionState] = {}
|
|
@@ -513,6 +553,9 @@ def _get_state(session_id: str) -> SessionState:
|
|
|
513
553
|
pending_image_waiters={},
|
|
514
554
|
handled_message_ids={},
|
|
515
555
|
handled_texts={},
|
|
556
|
+
history_lock=asyncio.Lock(),
|
|
557
|
+
summary_last_ts=0.0,
|
|
558
|
+
summary_in_progress=False,
|
|
516
559
|
)
|
|
517
560
|
_SESSIONS[session_id] = state
|
|
518
561
|
return state
|
|
@@ -527,12 +570,234 @@ def _get_client() -> genai.Client:
|
|
|
527
570
|
return _CLIENT
|
|
528
571
|
|
|
529
572
|
|
|
530
|
-
def
|
|
573
|
+
def _history_compress_enabled() -> bool:
|
|
574
|
+
try:
|
|
575
|
+
return bool(getattr(config, "history_compress_enable", True))
|
|
576
|
+
except Exception:
|
|
577
|
+
return True
|
|
578
|
+
|
|
579
|
+
|
|
580
|
+
def _history_reference_only() -> bool:
|
|
581
|
+
try:
|
|
582
|
+
return bool(getattr(config, "history_reference_only", True))
|
|
583
|
+
except Exception:
|
|
584
|
+
return True
|
|
585
|
+
|
|
586
|
+
|
|
587
|
+
def _history_compress_trigger() -> int:
|
|
588
|
+
try:
|
|
589
|
+
value = int(getattr(config, "history_compress_trigger", 0))
|
|
590
|
+
except Exception:
|
|
591
|
+
value = 0
|
|
592
|
+
if value <= 0:
|
|
593
|
+
try:
|
|
594
|
+
base = int(getattr(config, "history_max_messages", 10))
|
|
595
|
+
except Exception:
|
|
596
|
+
base = 10
|
|
597
|
+
return max(16, base * 2)
|
|
598
|
+
return value
|
|
599
|
+
|
|
600
|
+
|
|
601
|
+
def _history_compress_keep() -> int:
|
|
602
|
+
try:
|
|
603
|
+
value = int(getattr(config, "history_compress_keep", 0))
|
|
604
|
+
except Exception:
|
|
605
|
+
value = 0
|
|
606
|
+
if value <= 0:
|
|
607
|
+
return 6
|
|
608
|
+
return value
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
def _history_compress_min_messages() -> int:
|
|
612
|
+
try:
|
|
613
|
+
value = int(getattr(config, "history_compress_min_messages", 0))
|
|
614
|
+
except Exception:
|
|
615
|
+
value = 0
|
|
616
|
+
if value <= 0:
|
|
617
|
+
return 6
|
|
618
|
+
return value
|
|
619
|
+
|
|
620
|
+
|
|
621
|
+
def _history_compress_max_chars() -> int:
|
|
622
|
+
try:
|
|
623
|
+
value = int(getattr(config, "history_compress_max_chars", 0))
|
|
624
|
+
except Exception:
|
|
625
|
+
value = 0
|
|
626
|
+
if value <= 0:
|
|
627
|
+
return 600
|
|
628
|
+
return value
|
|
629
|
+
|
|
630
|
+
|
|
631
|
+
def _history_hard_limit() -> int:
|
|
632
|
+
try:
|
|
633
|
+
base = int(getattr(config, "history_max_messages", 10))
|
|
634
|
+
except Exception:
|
|
635
|
+
base = 10
|
|
636
|
+
trigger = _history_compress_trigger()
|
|
637
|
+
return max(50, base * 5, trigger * 2)
|
|
638
|
+
|
|
639
|
+
|
|
640
|
+
def _count_non_summary_items(items: List[HistoryItem]) -> int:
|
|
641
|
+
return sum(1 for item in items if not item.is_summary)
|
|
642
|
+
|
|
643
|
+
|
|
644
|
+
def _history_item_label(item: HistoryItem) -> str:
|
|
645
|
+
if item.role == "model":
|
|
646
|
+
return item.user_name or _model_user_name()
|
|
647
|
+
if item.is_summary:
|
|
648
|
+
return item.user_name or "系统摘要"
|
|
649
|
+
return item.user_name or item.user_id or "用户"
|
|
650
|
+
|
|
651
|
+
|
|
652
|
+
def _history_item_to_line(item: HistoryItem) -> str:
|
|
653
|
+
text = _ensure_plain_text(str(item.text))
|
|
654
|
+
if not text:
|
|
655
|
+
return ""
|
|
656
|
+
text = _truncate(text, _HISTORY_SUMMARY_ITEM_MAX_CHARS)
|
|
657
|
+
name = _history_item_label(item)
|
|
658
|
+
if name:
|
|
659
|
+
return f"{name}: {text}"
|
|
660
|
+
return text
|
|
661
|
+
|
|
662
|
+
|
|
663
|
+
def _build_history_summary_input(items: List[HistoryItem]) -> str:
|
|
664
|
+
lines: List[str] = []
|
|
665
|
+
for item in items:
|
|
666
|
+
line = _history_item_to_line(item)
|
|
667
|
+
if line:
|
|
668
|
+
lines.append(line)
|
|
669
|
+
return "\n".join(lines).strip()
|
|
670
|
+
|
|
671
|
+
|
|
672
|
+
def _build_history_reference_text(state: SessionState) -> str:
|
|
673
|
+
lines: List[str] = []
|
|
674
|
+
for item in state.history:
|
|
675
|
+
if item.is_summary:
|
|
676
|
+
line = _history_item_to_line(item)
|
|
677
|
+
if line:
|
|
678
|
+
lines.append(line)
|
|
679
|
+
continue
|
|
680
|
+
if item.role == "user" and not item.to_bot:
|
|
681
|
+
continue
|
|
682
|
+
line = _history_item_to_line(item)
|
|
683
|
+
if line:
|
|
684
|
+
lines.append(line)
|
|
685
|
+
return "\n".join(lines).strip()
|
|
686
|
+
|
|
687
|
+
|
|
688
|
+
def _wrap_prompt_with_reference(
|
|
689
|
+
state: SessionState,
|
|
690
|
+
prompt: str,
|
|
691
|
+
*,
|
|
692
|
+
current_label: str,
|
|
693
|
+
) -> str:
|
|
694
|
+
if not _history_reference_only():
|
|
695
|
+
return prompt
|
|
696
|
+
reference_text = _build_history_reference_text(state)
|
|
697
|
+
if not reference_text:
|
|
698
|
+
return prompt
|
|
699
|
+
return f"参考对话(仅供参考,不需要回复):\n{reference_text}\n\n{current_label}:\n{prompt}"
|
|
700
|
+
|
|
701
|
+
|
|
702
|
+
async def _summarize_history_items(items: List[HistoryItem]) -> Optional[str]:
|
|
703
|
+
if not items:
|
|
704
|
+
return None
|
|
705
|
+
input_text = _build_history_summary_input(items)
|
|
706
|
+
if not input_text:
|
|
707
|
+
return None
|
|
708
|
+
max_chars = _history_compress_max_chars()
|
|
709
|
+
user_prompt = (
|
|
710
|
+
f"请总结以下对话记录,输出一段简短摘要,控制在{max_chars}字以内。\n"
|
|
711
|
+
f"对话记录:\n{input_text}"
|
|
712
|
+
)
|
|
713
|
+
client = _get_client()
|
|
714
|
+
config_obj, system_used = _build_generate_config(
|
|
715
|
+
system_instruction=_HISTORY_SUMMARY_SYSTEM_PROMPT
|
|
716
|
+
)
|
|
717
|
+
if _HISTORY_SUMMARY_SYSTEM_PROMPT and not system_used:
|
|
718
|
+
user_prompt = f"{_HISTORY_SUMMARY_SYSTEM_PROMPT}\n\n{user_prompt}"
|
|
719
|
+
response = await asyncio.wait_for(
|
|
720
|
+
client.aio.models.generate_content(
|
|
721
|
+
model=config.gemini_text_model,
|
|
722
|
+
contents=[types.Content(role="user", parts=[types.Part.from_text(text=user_prompt)])],
|
|
723
|
+
config=config_obj,
|
|
724
|
+
),
|
|
725
|
+
timeout=config.request_timeout,
|
|
726
|
+
)
|
|
727
|
+
if config.gemini_log_response:
|
|
728
|
+
logger.info("Gemini history summary response: {}", _dump_response(response))
|
|
729
|
+
_log_response_text("Gemini history summary content", response)
|
|
730
|
+
if response.text:
|
|
731
|
+
cleaned = _format_reply_text(response.text.strip())
|
|
732
|
+
cleaned = _compact_reply_lines(cleaned)
|
|
733
|
+
cleaned = _limit_reply_text(cleaned, max_chars)
|
|
734
|
+
return cleaned
|
|
735
|
+
text_parts: List[str] = []
|
|
736
|
+
for part in _iter_response_parts(response):
|
|
737
|
+
text_value = _extract_text_value(part)
|
|
738
|
+
if text_value:
|
|
739
|
+
text_parts.append(text_value)
|
|
740
|
+
cleaned = _format_reply_text("\n".join(text_parts).strip())
|
|
741
|
+
cleaned = _compact_reply_lines(cleaned)
|
|
742
|
+
cleaned = _limit_reply_text(cleaned, max_chars)
|
|
743
|
+
return cleaned
|
|
744
|
+
|
|
745
|
+
|
|
746
|
+
async def _maybe_compress_history(state: SessionState) -> bool:
|
|
747
|
+
# 历史压缩:达到阈值时把旧记录摘要成一条“系统摘要”,保留最近若干条
|
|
748
|
+
if not _history_compress_enabled():
|
|
749
|
+
return False
|
|
750
|
+
if not config.google_api_key:
|
|
751
|
+
return False
|
|
752
|
+
if state.summary_in_progress:
|
|
753
|
+
return False
|
|
754
|
+
trigger = _history_compress_trigger()
|
|
755
|
+
if len(state.history) < trigger:
|
|
756
|
+
return False
|
|
757
|
+
keep = max(0, _history_compress_keep())
|
|
758
|
+
if keep >= len(state.history):
|
|
759
|
+
return False
|
|
760
|
+
compress_items = state.history[:-keep] if keep > 0 else list(state.history)
|
|
761
|
+
if _count_non_summary_items(compress_items) < _history_compress_min_messages():
|
|
762
|
+
return False
|
|
763
|
+
if not any(item.to_bot or item.role == "model" for item in compress_items):
|
|
764
|
+
return False
|
|
765
|
+
state.summary_in_progress = True
|
|
766
|
+
try:
|
|
767
|
+
summary = await _summarize_history_items(compress_items)
|
|
768
|
+
except Exception as exc:
|
|
769
|
+
logger.error("History summary failed: {}", _safe_error_message(exc))
|
|
770
|
+
return False
|
|
771
|
+
finally:
|
|
772
|
+
state.summary_in_progress = False
|
|
773
|
+
if not summary:
|
|
774
|
+
return False
|
|
775
|
+
ts = compress_items[-1].ts if compress_items else _now()
|
|
776
|
+
summary_item = HistoryItem(
|
|
777
|
+
role="user",
|
|
778
|
+
text=summary,
|
|
779
|
+
ts=ts,
|
|
780
|
+
user_name="系统摘要",
|
|
781
|
+
to_bot=True,
|
|
782
|
+
is_summary=True,
|
|
783
|
+
)
|
|
784
|
+
keep_items = state.history[-keep:] if keep > 0 else []
|
|
785
|
+
state.history = [summary_item, *keep_items]
|
|
786
|
+
state.summary_last_ts = _now()
|
|
787
|
+
return True
|
|
788
|
+
|
|
789
|
+
|
|
790
|
+
def _prune_state(state: SessionState, *, trim_history: bool = True) -> None:
|
|
531
791
|
ttl = max(30, int(config.history_ttl_sec))
|
|
532
792
|
cutoff = _now() - ttl
|
|
533
793
|
state.history = [item for item in state.history if item.ts >= cutoff]
|
|
534
|
-
|
|
535
|
-
|
|
794
|
+
hard_limit = _history_hard_limit()
|
|
795
|
+
if hard_limit > 0 and len(state.history) > hard_limit:
|
|
796
|
+
state.history = state.history[-hard_limit:]
|
|
797
|
+
if trim_history:
|
|
798
|
+
max_messages = max(1, int(config.history_max_messages))
|
|
799
|
+
if len(state.history) > max_messages:
|
|
800
|
+
state.history = state.history[-max_messages:]
|
|
536
801
|
if state.image_cache:
|
|
537
802
|
state.image_cache = {
|
|
538
803
|
msg_id: (url, ts)
|
|
@@ -554,6 +819,8 @@ def _clear_session_state(state: SessionState) -> None:
|
|
|
554
819
|
state.history = []
|
|
555
820
|
state.last_image_url = None
|
|
556
821
|
state.image_cache = {}
|
|
822
|
+
state.summary_last_ts = 0.0
|
|
823
|
+
state.summary_in_progress = False
|
|
557
824
|
if state.pending_image_waiters:
|
|
558
825
|
for waiter in state.pending_image_waiters.values():
|
|
559
826
|
if not waiter.done():
|
|
@@ -619,13 +886,24 @@ def _extract_first_image_url(message: Message) -> Optional[str]:
|
|
|
619
886
|
return None
|
|
620
887
|
|
|
621
888
|
|
|
622
|
-
def
|
|
889
|
+
def _extract_at_users(message: Message, self_id: Optional[object]) -> List[str]:
|
|
890
|
+
users: List[str] = []
|
|
891
|
+
seen: set[str] = set()
|
|
892
|
+
self_str = str(self_id) if self_id is not None else None
|
|
623
893
|
for seg in message:
|
|
624
|
-
if seg.type
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
894
|
+
if seg.type != "at":
|
|
895
|
+
continue
|
|
896
|
+
qq = seg.data.get("qq")
|
|
897
|
+
if not qq or qq == "all":
|
|
898
|
+
continue
|
|
899
|
+
qq_str = str(qq)
|
|
900
|
+
if self_str and qq_str == self_str:
|
|
901
|
+
continue
|
|
902
|
+
if qq_str in seen:
|
|
903
|
+
continue
|
|
904
|
+
seen.add(qq_str)
|
|
905
|
+
users.append(qq_str)
|
|
906
|
+
return users
|
|
629
907
|
|
|
630
908
|
|
|
631
909
|
def _avatar_url(qq: str) -> str:
|
|
@@ -966,7 +1244,20 @@ def _extract_text_value(part: object) -> Optional[str]:
|
|
|
966
1244
|
|
|
967
1245
|
async def _call_gemini_text(prompt: str, state: SessionState) -> str:
|
|
968
1246
|
client = _get_client()
|
|
969
|
-
|
|
1247
|
+
# 只回复当前消息:历史作为“参考文本”拼到当前指令里
|
|
1248
|
+
if _history_reference_only():
|
|
1249
|
+
prompt = _wrap_prompt_with_reference(state, prompt, current_label="当前消息")
|
|
1250
|
+
contents = [
|
|
1251
|
+
types.Content(
|
|
1252
|
+
role="user",
|
|
1253
|
+
parts=[types.Part.from_text(text=prompt)],
|
|
1254
|
+
)
|
|
1255
|
+
]
|
|
1256
|
+
else:
|
|
1257
|
+
contents = _history_to_gemini(state)
|
|
1258
|
+
contents.append(
|
|
1259
|
+
types.Content(role="user", parts=[types.Part.from_text(text=prompt)])
|
|
1260
|
+
)
|
|
970
1261
|
config_obj, system_used = _build_generate_config(system_instruction=_CHAT_SYSTEM_PROMPT)
|
|
971
1262
|
if _CHAT_SYSTEM_PROMPT and not system_used:
|
|
972
1263
|
contents.insert(
|
|
@@ -976,7 +1267,6 @@ async def _call_gemini_text(prompt: str, state: SessionState) -> str:
|
|
|
976
1267
|
parts=[types.Part.from_text(text=_CHAT_SYSTEM_PROMPT)],
|
|
977
1268
|
),
|
|
978
1269
|
)
|
|
979
|
-
contents.append(types.Content(role="user", parts=[types.Part.from_text(text=prompt)]))
|
|
980
1270
|
response = await asyncio.wait_for(
|
|
981
1271
|
client.aio.models.generate_content(
|
|
982
1272
|
model=config.gemini_text_model,
|
|
@@ -1024,9 +1314,21 @@ def _build_travel_prompt(intent: dict) -> str:
|
|
|
1024
1314
|
|
|
1025
1315
|
async def _call_gemini_travel_plan(intent: dict, state: SessionState) -> str:
|
|
1026
1316
|
client = _get_client()
|
|
1027
|
-
contents = _history_to_gemini(state)
|
|
1028
1317
|
prompt = _build_travel_prompt(intent)
|
|
1029
|
-
|
|
1318
|
+
# 只回复当前需求:历史作为参考文本
|
|
1319
|
+
if _history_reference_only():
|
|
1320
|
+
prompt = _wrap_prompt_with_reference(state, prompt, current_label="当前需求")
|
|
1321
|
+
contents = [
|
|
1322
|
+
types.Content(
|
|
1323
|
+
role="user",
|
|
1324
|
+
parts=[types.Part.from_text(text=prompt)],
|
|
1325
|
+
)
|
|
1326
|
+
]
|
|
1327
|
+
else:
|
|
1328
|
+
contents = _history_to_gemini(state)
|
|
1329
|
+
contents.append(
|
|
1330
|
+
types.Content(role="user", parts=[types.Part.from_text(text=prompt)])
|
|
1331
|
+
)
|
|
1030
1332
|
config_obj, _ = _build_generate_config()
|
|
1031
1333
|
response = await asyncio.wait_for(
|
|
1032
1334
|
client.aio.models.generate_content(
|
|
@@ -1066,7 +1368,12 @@ async def _download_image_bytes(url: str) -> Tuple[str, bytes]:
|
|
|
1066
1368
|
async def _call_gemini_image(prompt: str, image_url: str, state: SessionState) -> Tuple[bool, str]:
|
|
1067
1369
|
client = _get_client()
|
|
1068
1370
|
content_type, image_bytes = await _download_image_bytes(image_url)
|
|
1069
|
-
|
|
1371
|
+
# 参考历史 + 当前指令 + 参考图,进行图片编辑
|
|
1372
|
+
prompt = _wrap_prompt_with_reference(state, prompt, current_label="当前指令")
|
|
1373
|
+
if _history_reference_only():
|
|
1374
|
+
contents = []
|
|
1375
|
+
else:
|
|
1376
|
+
contents = _history_to_gemini(state)
|
|
1070
1377
|
contents.append(
|
|
1071
1378
|
types.Content(
|
|
1072
1379
|
role="user",
|
|
@@ -1116,7 +1423,12 @@ async def _call_gemini_image(prompt: str, image_url: str, state: SessionState) -
|
|
|
1116
1423
|
async def _call_gemini_image_chat(prompt: str, image_url: str, state: SessionState) -> str:
|
|
1117
1424
|
client = _get_client()
|
|
1118
1425
|
content_type, image_bytes = await _download_image_bytes(image_url)
|
|
1119
|
-
|
|
1426
|
+
# 参考历史 + 当前指令 + 参考图,只要文本回答(聊图)
|
|
1427
|
+
prompt = _wrap_prompt_with_reference(state, prompt, current_label="当前指令")
|
|
1428
|
+
if _history_reference_only():
|
|
1429
|
+
contents = []
|
|
1430
|
+
else:
|
|
1431
|
+
contents = _history_to_gemini(state)
|
|
1120
1432
|
config_obj, system_used = _build_generate_config(
|
|
1121
1433
|
system_instruction=_IMAGE_CHAT_SYSTEM_PROMPT,
|
|
1122
1434
|
response_modalities=["TEXT"],
|
|
@@ -1165,8 +1477,20 @@ async def _call_gemini_image_chat(prompt: str, image_url: str, state: SessionSta
|
|
|
1165
1477
|
|
|
1166
1478
|
async def _call_gemini_text_to_image(prompt: str, state: SessionState) -> Tuple[bool, str]:
|
|
1167
1479
|
client = _get_client()
|
|
1168
|
-
|
|
1169
|
-
|
|
1480
|
+
# 只回复当前指令:可附带历史参考文本
|
|
1481
|
+
prompt = _wrap_prompt_with_reference(state, prompt, current_label="当前指令")
|
|
1482
|
+
if _history_reference_only():
|
|
1483
|
+
contents = [
|
|
1484
|
+
types.Content(
|
|
1485
|
+
role="user",
|
|
1486
|
+
parts=[types.Part.from_text(text=prompt)],
|
|
1487
|
+
)
|
|
1488
|
+
]
|
|
1489
|
+
else:
|
|
1490
|
+
contents = _history_to_gemini(state)
|
|
1491
|
+
contents.append(
|
|
1492
|
+
types.Content(role="user", parts=[types.Part.from_text(text=prompt)])
|
|
1493
|
+
)
|
|
1170
1494
|
config_obj, _ = _build_generate_config(response_modalities=["IMAGE"])
|
|
1171
1495
|
response = await asyncio.wait_for(
|
|
1172
1496
|
client.aio.models.generate_content(
|
|
@@ -1214,7 +1538,7 @@ def _image_segment_from_result(result: str) -> MessageSegment:
|
|
|
1214
1538
|
return MessageSegment.image(f"base64://{result}")
|
|
1215
1539
|
|
|
1216
1540
|
|
|
1217
|
-
def _append_history(
|
|
1541
|
+
async def _append_history(
|
|
1218
1542
|
state: SessionState,
|
|
1219
1543
|
role: str,
|
|
1220
1544
|
text: str,
|
|
@@ -1225,20 +1549,23 @@ def _append_history(
|
|
|
1225
1549
|
ts: Optional[float] = None,
|
|
1226
1550
|
message_id: Optional[int] = None,
|
|
1227
1551
|
) -> None:
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
|
|
1552
|
+
async with state.history_lock:
|
|
1553
|
+
if role == "model" and not user_name:
|
|
1554
|
+
user_name = _model_user_name()
|
|
1555
|
+
state.history.append(
|
|
1556
|
+
HistoryItem(
|
|
1557
|
+
role=role,
|
|
1558
|
+
text=text,
|
|
1559
|
+
ts=_now() if ts is None else ts,
|
|
1560
|
+
user_id=user_id,
|
|
1561
|
+
user_name=user_name,
|
|
1562
|
+
to_bot=to_bot,
|
|
1563
|
+
message_id=message_id,
|
|
1564
|
+
)
|
|
1239
1565
|
)
|
|
1240
|
-
|
|
1241
|
-
|
|
1566
|
+
_prune_state(state, trim_history=False)
|
|
1567
|
+
await _maybe_compress_history(state)
|
|
1568
|
+
_prune_state(state)
|
|
1242
1569
|
|
|
1243
1570
|
|
|
1244
1571
|
history_collector = on_message(priority=99, block=False)
|
|
@@ -1265,7 +1592,7 @@ async def _collect_history(event: MessageEvent):
|
|
|
1265
1592
|
|
|
1266
1593
|
if text:
|
|
1267
1594
|
user_name = _event_user_name(event)
|
|
1268
|
-
_append_history(
|
|
1595
|
+
await _append_history(
|
|
1269
1596
|
state,
|
|
1270
1597
|
"user",
|
|
1271
1598
|
text,
|
|
@@ -1452,29 +1779,33 @@ async def _resolve_image_url(
|
|
|
1452
1779
|
|
|
1453
1780
|
def _collect_context_messages(
|
|
1454
1781
|
state: SessionState,
|
|
1455
|
-
current_user_id: str,
|
|
1456
1782
|
*,
|
|
1457
1783
|
ts: float,
|
|
1458
1784
|
limit: int,
|
|
1459
1785
|
future: bool,
|
|
1460
1786
|
current_text: str,
|
|
1787
|
+
current_message_id: Optional[int],
|
|
1461
1788
|
) -> List[str]:
|
|
1462
1789
|
if limit <= 0:
|
|
1463
1790
|
return []
|
|
1464
1791
|
texts: List[str] = []
|
|
1465
1792
|
items = state.history if future else reversed(state.history)
|
|
1466
1793
|
for item in items:
|
|
1467
|
-
if item.role
|
|
1794
|
+
if item.role not in {"user", "model"}:
|
|
1468
1795
|
continue
|
|
1469
|
-
if not item.to_bot:
|
|
1796
|
+
if item.role == "user" and not item.to_bot:
|
|
1470
1797
|
continue
|
|
1471
1798
|
if future and item.ts <= ts:
|
|
1472
1799
|
continue
|
|
1473
1800
|
if not future and item.ts > ts:
|
|
1474
1801
|
continue
|
|
1475
|
-
if
|
|
1802
|
+
if current_message_id is not None and item.message_id == current_message_id:
|
|
1476
1803
|
continue
|
|
1477
|
-
|
|
1804
|
+
if item.role == "model":
|
|
1805
|
+
name = item.user_name or _model_user_name()
|
|
1806
|
+
else:
|
|
1807
|
+
name = item.user_name or item.user_id
|
|
1808
|
+
line = _format_context_line(item.text, name)
|
|
1478
1809
|
texts.append(line)
|
|
1479
1810
|
if len(texts) >= limit:
|
|
1480
1811
|
break
|
|
@@ -1529,16 +1860,17 @@ async def _build_intent_text(
|
|
|
1529
1860
|
wait_sec = 1.0
|
|
1530
1861
|
|
|
1531
1862
|
ts = _event_ts(event)
|
|
1532
|
-
|
|
1863
|
+
current_name = _event_user_name(event)
|
|
1864
|
+
current_message_id = getattr(event, "message_id", None)
|
|
1533
1865
|
reply_text, reply_name = _extract_reply_context(event, state)
|
|
1534
1866
|
|
|
1535
1867
|
prev_texts = _collect_context_messages(
|
|
1536
1868
|
state,
|
|
1537
|
-
user_id,
|
|
1538
1869
|
ts=ts,
|
|
1539
1870
|
limit=max_prev,
|
|
1540
1871
|
future=False,
|
|
1541
1872
|
current_text=text,
|
|
1873
|
+
current_message_id=current_message_id if isinstance(current_message_id, int) else None,
|
|
1542
1874
|
)
|
|
1543
1875
|
future_texts: List[str] = []
|
|
1544
1876
|
if max_future > 0:
|
|
@@ -1546,11 +1878,11 @@ async def _build_intent_text(
|
|
|
1546
1878
|
await asyncio.sleep(wait_sec)
|
|
1547
1879
|
future_texts = _collect_context_messages(
|
|
1548
1880
|
state,
|
|
1549
|
-
user_id,
|
|
1550
1881
|
ts=ts,
|
|
1551
1882
|
limit=max_future,
|
|
1552
1883
|
future=True,
|
|
1553
1884
|
current_text=text,
|
|
1885
|
+
current_message_id=current_message_id if isinstance(current_message_id, int) else None,
|
|
1554
1886
|
)
|
|
1555
1887
|
|
|
1556
1888
|
reply_line = ""
|
|
@@ -1562,11 +1894,11 @@ async def _build_intent_text(
|
|
|
1562
1894
|
)
|
|
1563
1895
|
combined = [
|
|
1564
1896
|
part
|
|
1565
|
-
for part in [text, reply_line, *prev_texts, *future_texts]
|
|
1897
|
+
for part in [_format_context_line(text, current_name), reply_line, *prev_texts, *future_texts]
|
|
1566
1898
|
if part
|
|
1567
1899
|
]
|
|
1568
1900
|
if not combined:
|
|
1569
|
-
return text
|
|
1901
|
+
return _format_context_line(text, current_name)
|
|
1570
1902
|
return "\n".join(combined)
|
|
1571
1903
|
|
|
1572
1904
|
|
|
@@ -1575,17 +1907,18 @@ def _build_primary_intent_text(
|
|
|
1575
1907
|
state: SessionState,
|
|
1576
1908
|
text: str,
|
|
1577
1909
|
) -> str:
|
|
1910
|
+
current_name = _event_user_name(event)
|
|
1578
1911
|
reply_text, reply_name = _extract_reply_context(event, state)
|
|
1579
1912
|
if not reply_text:
|
|
1580
|
-
return text
|
|
1913
|
+
return _format_context_line(text, current_name)
|
|
1581
1914
|
if reply_text.strip() == text.strip():
|
|
1582
|
-
return text
|
|
1915
|
+
return _format_context_line(text, current_name)
|
|
1583
1916
|
reply_line = (
|
|
1584
1917
|
_format_context_line(reply_text, reply_name)
|
|
1585
1918
|
if reply_name
|
|
1586
1919
|
else f"回复内容: {reply_text}"
|
|
1587
1920
|
)
|
|
1588
|
-
return "\n".join([text, reply_line])
|
|
1921
|
+
return "\n".join([_format_context_line(text, current_name), reply_line])
|
|
1589
1922
|
|
|
1590
1923
|
|
|
1591
1924
|
_ALLOWED_ACTIONS = {
|
|
@@ -1625,7 +1958,7 @@ def _normalize_intent(
|
|
|
1625
1958
|
intent: Optional[dict],
|
|
1626
1959
|
has_image: bool,
|
|
1627
1960
|
has_reply_image: bool,
|
|
1628
|
-
|
|
1961
|
+
at_users: List[str],
|
|
1629
1962
|
state: SessionState,
|
|
1630
1963
|
) -> Optional[dict]:
|
|
1631
1964
|
if not isinstance(intent, dict):
|
|
@@ -1657,7 +1990,7 @@ def _normalize_intent(
|
|
|
1657
1990
|
target = "message_image"
|
|
1658
1991
|
elif has_reply_image:
|
|
1659
1992
|
target = "reply_image"
|
|
1660
|
-
elif
|
|
1993
|
+
elif at_users:
|
|
1661
1994
|
target = "at_user"
|
|
1662
1995
|
elif state.last_image_url:
|
|
1663
1996
|
target = "last_image"
|
|
@@ -1751,7 +2084,7 @@ async def _build_travel_plan_reply(
|
|
|
1751
2084
|
if cleaned_instruction and cleaned_instruction not in summary:
|
|
1752
2085
|
summary = f"{summary} 需求:{cleaned_instruction}"
|
|
1753
2086
|
user_name = _event_user_name(event)
|
|
1754
|
-
_append_history(
|
|
2087
|
+
await _append_history(
|
|
1755
2088
|
state,
|
|
1756
2089
|
"user",
|
|
1757
2090
|
f"旅行规划:{summary}",
|
|
@@ -1759,7 +2092,7 @@ async def _build_travel_plan_reply(
|
|
|
1759
2092
|
user_name=user_name,
|
|
1760
2093
|
to_bot=True,
|
|
1761
2094
|
)
|
|
1762
|
-
_append_history(state, "model", reply)
|
|
2095
|
+
await _append_history(state, "model", reply)
|
|
1763
2096
|
return reply
|
|
1764
2097
|
|
|
1765
2098
|
|
|
@@ -1772,21 +2105,24 @@ async def _dispatch_intent(
|
|
|
1772
2105
|
*,
|
|
1773
2106
|
image_url: Optional[str],
|
|
1774
2107
|
reply_image_url: Optional[str],
|
|
1775
|
-
|
|
2108
|
+
at_users: List[str],
|
|
1776
2109
|
send_func,
|
|
1777
2110
|
) -> None:
|
|
2111
|
+
# 意图分发:按 action 走不同处理链路
|
|
1778
2112
|
action = str(intent.get("action", "ignore")).lower()
|
|
1779
2113
|
if action == "ignore":
|
|
1780
2114
|
return
|
|
1781
2115
|
user_name = _event_user_name(event)
|
|
2116
|
+
at_user = at_users[0] if at_users else None
|
|
1782
2117
|
|
|
1783
2118
|
if action == "chat":
|
|
2119
|
+
# 普通聊天(文本)
|
|
1784
2120
|
prompt = intent.get("instruction")
|
|
1785
2121
|
try:
|
|
1786
2122
|
reply = await _call_gemini_text(str(prompt), state)
|
|
1787
2123
|
if not reply:
|
|
1788
2124
|
return
|
|
1789
|
-
_append_history(
|
|
2125
|
+
await _append_history(
|
|
1790
2126
|
state,
|
|
1791
2127
|
"user",
|
|
1792
2128
|
str(prompt),
|
|
@@ -1794,7 +2130,7 @@ async def _dispatch_intent(
|
|
|
1794
2130
|
user_name=user_name,
|
|
1795
2131
|
to_bot=True,
|
|
1796
2132
|
)
|
|
1797
|
-
_append_history(state, "model", reply)
|
|
2133
|
+
await _append_history(state, "model", reply)
|
|
1798
2134
|
await _send_text_response(bot, event, send_func, reply)
|
|
1799
2135
|
_mark_handled_request(state, event, text)
|
|
1800
2136
|
except Exception as exc:
|
|
@@ -1802,6 +2138,7 @@ async def _dispatch_intent(
|
|
|
1802
2138
|
return
|
|
1803
2139
|
|
|
1804
2140
|
if action == "weather":
|
|
2141
|
+
# 天气查询
|
|
1805
2142
|
query = str(intent.get("instruction") or "").strip()
|
|
1806
2143
|
if not query:
|
|
1807
2144
|
await send_func("请告诉我城市或地区,例如:天气 北京")
|
|
@@ -1812,7 +2149,7 @@ async def _dispatch_intent(
|
|
|
1812
2149
|
if not messages:
|
|
1813
2150
|
return
|
|
1814
2151
|
reply_text = "\n".join(messages)
|
|
1815
|
-
_append_history(
|
|
2152
|
+
await _append_history(
|
|
1816
2153
|
state,
|
|
1817
2154
|
"user",
|
|
1818
2155
|
f"天气:{query}",
|
|
@@ -1820,7 +2157,7 @@ async def _dispatch_intent(
|
|
|
1820
2157
|
user_name=user_name,
|
|
1821
2158
|
to_bot=True,
|
|
1822
2159
|
)
|
|
1823
|
-
_append_history(state, "model", reply_text)
|
|
2160
|
+
await _append_history(state, "model", reply_text)
|
|
1824
2161
|
for msg in messages:
|
|
1825
2162
|
await send_func(msg)
|
|
1826
2163
|
_mark_handled_request(state, event, text)
|
|
@@ -1830,6 +2167,7 @@ async def _dispatch_intent(
|
|
|
1830
2167
|
return
|
|
1831
2168
|
|
|
1832
2169
|
if action == "travel_plan":
|
|
2170
|
+
# 旅行规划
|
|
1833
2171
|
params = _intent_params(intent)
|
|
1834
2172
|
destination = params.get("destination")
|
|
1835
2173
|
if not isinstance(destination, str) or not destination.strip():
|
|
@@ -1848,17 +2186,24 @@ async def _dispatch_intent(
|
|
|
1848
2186
|
return
|
|
1849
2187
|
|
|
1850
2188
|
if action == "history_clear":
|
|
2189
|
+
# 清空当前会话历史
|
|
1851
2190
|
_clear_session_state(state)
|
|
1852
2191
|
await send_func("已清除当前会话记录,可以继续聊啦。")
|
|
1853
2192
|
return
|
|
1854
2193
|
|
|
1855
2194
|
if action == "avatar_get":
|
|
2195
|
+
# 获取头像(发送者/群/指定QQ等)
|
|
1856
2196
|
target = str(intent.get("target") or "").lower()
|
|
1857
2197
|
params = _intent_params(intent)
|
|
1858
2198
|
if target == "qq_avatar" and not params.get("qq"):
|
|
1859
2199
|
await send_func("请提供 QQ 号。")
|
|
1860
2200
|
return
|
|
1861
2201
|
await _send_transition(action, send_func)
|
|
2202
|
+
if target == "at_user" and len(at_users) > 1:
|
|
2203
|
+
for qq in at_users:
|
|
2204
|
+
await send_func(_image_segment_from_result(_avatar_url(qq)))
|
|
2205
|
+
_mark_handled_request(state, event, text)
|
|
2206
|
+
return
|
|
1862
2207
|
image_url = await _resolve_image_url(
|
|
1863
2208
|
intent,
|
|
1864
2209
|
event=event,
|
|
@@ -1879,12 +2224,13 @@ async def _dispatch_intent(
|
|
|
1879
2224
|
params = _intent_params(intent)
|
|
1880
2225
|
|
|
1881
2226
|
if action == "image_create":
|
|
1882
|
-
|
|
2227
|
+
# 无参考图的图片生成
|
|
2228
|
+
transition_text = _resolve_transition_text(action, intent)
|
|
1883
2229
|
if transition_text:
|
|
1884
2230
|
await send_func(transition_text)
|
|
1885
2231
|
try:
|
|
1886
2232
|
is_image, result = await _call_gemini_text_to_image(prompt, state)
|
|
1887
|
-
_append_history(
|
|
2233
|
+
await _append_history(
|
|
1888
2234
|
state,
|
|
1889
2235
|
"user",
|
|
1890
2236
|
f"生成图片:{prompt}",
|
|
@@ -1893,12 +2239,11 @@ async def _dispatch_intent(
|
|
|
1893
2239
|
to_bot=True,
|
|
1894
2240
|
)
|
|
1895
2241
|
if is_image:
|
|
1896
|
-
_append_history(state, "model", "[已生成图片]")
|
|
1897
|
-
await send_func("已生成图片。")
|
|
2242
|
+
await _append_history(state, "model", "[已生成图片]")
|
|
1898
2243
|
await send_func(_image_segment_from_result(result))
|
|
1899
2244
|
_mark_handled_request(state, event, text)
|
|
1900
2245
|
else:
|
|
1901
|
-
_append_history(state, "model", result)
|
|
2246
|
+
await _append_history(state, "model", result)
|
|
1902
2247
|
await send_func(f"生成结果:{result}")
|
|
1903
2248
|
_mark_handled_request(state, event, text)
|
|
1904
2249
|
except Exception as exc:
|
|
@@ -1918,6 +2263,63 @@ async def _dispatch_intent(
|
|
|
1918
2263
|
if target == "wait_next":
|
|
1919
2264
|
await send_func("请在60秒内发送图片。")
|
|
1920
2265
|
|
|
2266
|
+
if target == "at_user" and len(at_users) > 1:
|
|
2267
|
+
if action == "image_chat":
|
|
2268
|
+
try:
|
|
2269
|
+
await _send_transition(action, send_func)
|
|
2270
|
+
for qq in at_users:
|
|
2271
|
+
avatar_url = _avatar_url(qq)
|
|
2272
|
+
reply = await _call_gemini_image_chat(prompt, avatar_url, state)
|
|
2273
|
+
if not reply:
|
|
2274
|
+
continue
|
|
2275
|
+
await _append_history(
|
|
2276
|
+
state,
|
|
2277
|
+
"user",
|
|
2278
|
+
f"聊图({qq}):{prompt}",
|
|
2279
|
+
user_id=str(event.get_user_id()),
|
|
2280
|
+
user_name=user_name,
|
|
2281
|
+
to_bot=True,
|
|
2282
|
+
)
|
|
2283
|
+
await _append_history(state, "model", reply)
|
|
2284
|
+
await send_func(f"QQ {qq}:{reply}")
|
|
2285
|
+
_mark_handled_request(state, event, text)
|
|
2286
|
+
except UnsupportedImageError:
|
|
2287
|
+
await send_func("这个格式我处理不了,发张静态图吧。")
|
|
2288
|
+
except Exception as exc:
|
|
2289
|
+
logger.error("NLP image chat failed: {}", _safe_error_message(exc))
|
|
2290
|
+
await send_func(f"出错了:{_safe_error_message(exc)}")
|
|
2291
|
+
return
|
|
2292
|
+
if action == "image_generate":
|
|
2293
|
+
try:
|
|
2294
|
+
transition_text = _resolve_transition_text(action, intent)
|
|
2295
|
+
if transition_text:
|
|
2296
|
+
await send_func(transition_text)
|
|
2297
|
+
for qq in at_users:
|
|
2298
|
+
avatar_url = _avatar_url(qq)
|
|
2299
|
+
is_image, result = await _call_gemini_image(prompt, avatar_url, state)
|
|
2300
|
+
await _append_history(
|
|
2301
|
+
state,
|
|
2302
|
+
"user",
|
|
2303
|
+
f"处理头像({qq}):{prompt}",
|
|
2304
|
+
user_id=str(event.get_user_id()),
|
|
2305
|
+
user_name=user_name,
|
|
2306
|
+
to_bot=True,
|
|
2307
|
+
)
|
|
2308
|
+
if is_image:
|
|
2309
|
+
await _append_history(state, "model", "[已生成图片]")
|
|
2310
|
+
await send_func(f"QQ {qq} 已完成修改。")
|
|
2311
|
+
await send_func(_image_segment_from_result(result))
|
|
2312
|
+
else:
|
|
2313
|
+
await _append_history(state, "model", result)
|
|
2314
|
+
await send_func(f"QQ {qq} 修改结果:{result}")
|
|
2315
|
+
_mark_handled_request(state, event, text)
|
|
2316
|
+
except UnsupportedImageError:
|
|
2317
|
+
await send_func("这个格式我处理不了,发张静态图吧。")
|
|
2318
|
+
except Exception as exc:
|
|
2319
|
+
logger.error("NLP image failed: {}", _safe_error_message(exc))
|
|
2320
|
+
await send_func(f"出错了:{_safe_error_message(exc)}")
|
|
2321
|
+
return
|
|
2322
|
+
|
|
1921
2323
|
image_url = await _resolve_image_url(
|
|
1922
2324
|
intent,
|
|
1923
2325
|
event=event,
|
|
@@ -1931,12 +2333,13 @@ async def _dispatch_intent(
|
|
|
1931
2333
|
return
|
|
1932
2334
|
|
|
1933
2335
|
if action == "image_chat":
|
|
2336
|
+
# 聊图:有参考图,仅文本回答
|
|
1934
2337
|
try:
|
|
1935
2338
|
await _send_transition(action, send_func)
|
|
1936
2339
|
reply = await _call_gemini_image_chat(prompt, image_url, state)
|
|
1937
2340
|
if not reply:
|
|
1938
2341
|
return
|
|
1939
|
-
_append_history(
|
|
2342
|
+
await _append_history(
|
|
1940
2343
|
state,
|
|
1941
2344
|
"user",
|
|
1942
2345
|
f"聊图:{prompt}",
|
|
@@ -1944,7 +2347,7 @@ async def _dispatch_intent(
|
|
|
1944
2347
|
user_name=user_name,
|
|
1945
2348
|
to_bot=True,
|
|
1946
2349
|
)
|
|
1947
|
-
_append_history(state, "model", reply)
|
|
2350
|
+
await _append_history(state, "model", reply)
|
|
1948
2351
|
await _send_text_response(bot, event, send_func, reply)
|
|
1949
2352
|
_mark_handled_request(state, event, text)
|
|
1950
2353
|
except UnsupportedImageError:
|
|
@@ -1955,11 +2358,12 @@ async def _dispatch_intent(
|
|
|
1955
2358
|
return
|
|
1956
2359
|
|
|
1957
2360
|
try:
|
|
1958
|
-
|
|
2361
|
+
# 处理头像/图片:有参考图,可能返回图片或文本
|
|
2362
|
+
transition_text = _resolve_transition_text(action, intent)
|
|
1959
2363
|
if transition_text:
|
|
1960
2364
|
await send_func(transition_text)
|
|
1961
2365
|
is_image, result = await _call_gemini_image(prompt, image_url, state)
|
|
1962
|
-
_append_history(
|
|
2366
|
+
await _append_history(
|
|
1963
2367
|
state,
|
|
1964
2368
|
"user",
|
|
1965
2369
|
f"处理头像:{prompt}",
|
|
@@ -1968,12 +2372,11 @@ async def _dispatch_intent(
|
|
|
1968
2372
|
to_bot=True,
|
|
1969
2373
|
)
|
|
1970
2374
|
if is_image:
|
|
1971
|
-
_append_history(state, "model", "[
|
|
1972
|
-
await send_func("已完成修改。")
|
|
2375
|
+
await _append_history(state, "model", "[已修改图片]")
|
|
1973
2376
|
await send_func(_image_segment_from_result(result))
|
|
1974
2377
|
_mark_handled_request(state, event, text)
|
|
1975
2378
|
else:
|
|
1976
|
-
_append_history(state, "model", result)
|
|
2379
|
+
await _append_history(state, "model", result)
|
|
1977
2380
|
await send_func(f"修改结果:{result}")
|
|
1978
2381
|
_mark_handled_request(state, event, text)
|
|
1979
2382
|
except UnsupportedImageError:
|
|
@@ -2106,7 +2509,7 @@ async def _classify_intent(
|
|
|
2106
2509
|
state: SessionState,
|
|
2107
2510
|
has_image: bool,
|
|
2108
2511
|
has_reply_image: bool,
|
|
2109
|
-
|
|
2512
|
+
at_users: List[str],
|
|
2110
2513
|
) -> Optional[dict]:
|
|
2111
2514
|
if not config.google_api_key:
|
|
2112
2515
|
return None
|
|
@@ -2116,7 +2519,7 @@ async def _classify_intent(
|
|
|
2116
2519
|
f"文本: {text}\n"
|
|
2117
2520
|
f"消息包含图片: {has_image}\n"
|
|
2118
2521
|
f"回复里有图片: {has_reply_image}\n"
|
|
2119
|
-
f"是否@用户: {bool(
|
|
2522
|
+
f"是否@用户: {bool(at_users)}\n"
|
|
2120
2523
|
f"是否有最近图片: {bool(state.last_image_url)}\n"
|
|
2121
2524
|
)
|
|
2122
2525
|
config_obj, system_used = _build_generate_config(
|
|
@@ -2161,7 +2564,7 @@ async def _handle_natural_language(bot: Bot, event: MessageEvent):
|
|
|
2161
2564
|
if _is_duplicate_request(state, event, text):
|
|
2162
2565
|
return
|
|
2163
2566
|
image_url = _extract_first_image_url(event.get_message())
|
|
2164
|
-
|
|
2567
|
+
at_users = _extract_at_users(event.get_message(), event.self_id)
|
|
2165
2568
|
reply_image_url = _extract_reply_image_url(event, state)
|
|
2166
2569
|
has_image = image_url is not None
|
|
2167
2570
|
has_reply_image = reply_image_url is not None
|
|
@@ -2169,22 +2572,22 @@ async def _handle_natural_language(bot: Bot, event: MessageEvent):
|
|
|
2169
2572
|
try:
|
|
2170
2573
|
primary_text = _build_primary_intent_text(event, state, text)
|
|
2171
2574
|
intent_raw = await _classify_intent(
|
|
2172
|
-
primary_text, state, has_image, has_reply_image,
|
|
2575
|
+
primary_text, state, has_image, has_reply_image, at_users
|
|
2173
2576
|
)
|
|
2174
2577
|
except Exception as exc:
|
|
2175
2578
|
logger.error("Intent classify failed: {}", _safe_error_message(exc))
|
|
2176
2579
|
return
|
|
2177
2580
|
|
|
2178
|
-
intent = _normalize_intent(intent_raw, has_image, has_reply_image,
|
|
2581
|
+
intent = _normalize_intent(intent_raw, has_image, has_reply_image, at_users, state)
|
|
2179
2582
|
if not intent:
|
|
2180
2583
|
try:
|
|
2181
2584
|
intent_text = await _build_intent_text(event, state, text)
|
|
2182
2585
|
if intent_text and intent_text != primary_text:
|
|
2183
2586
|
intent_raw = await _classify_intent(
|
|
2184
|
-
intent_text, state, has_image, has_reply_image,
|
|
2587
|
+
intent_text, state, has_image, has_reply_image, at_users
|
|
2185
2588
|
)
|
|
2186
2589
|
intent = _normalize_intent(
|
|
2187
|
-
intent_raw, has_image, has_reply_image,
|
|
2590
|
+
intent_raw, has_image, has_reply_image, at_users, state
|
|
2188
2591
|
)
|
|
2189
2592
|
except Exception as exc:
|
|
2190
2593
|
logger.error("Intent classify failed: {}", _safe_error_message(exc))
|
|
@@ -2205,7 +2608,7 @@ async def _handle_natural_language(bot: Bot, event: MessageEvent):
|
|
|
2205
2608
|
text,
|
|
2206
2609
|
image_url=image_url,
|
|
2207
2610
|
reply_image_url=reply_image_url,
|
|
2208
|
-
|
|
2611
|
+
at_users=at_users,
|
|
2209
2612
|
send_func=nlp_handler.send,
|
|
2210
2613
|
)
|
|
2211
2614
|
|
|
@@ -2223,19 +2626,19 @@ async def _handle_command_via_intent(
|
|
|
2223
2626
|
session_id = _session_id(event)
|
|
2224
2627
|
state = _get_state(session_id)
|
|
2225
2628
|
image_url = _extract_first_image_url(event.get_message())
|
|
2226
|
-
|
|
2629
|
+
at_users = _extract_at_users(event.get_message(), event.self_id)
|
|
2227
2630
|
reply_image_url = _extract_reply_image_url(event, state)
|
|
2228
2631
|
has_image = image_url is not None
|
|
2229
2632
|
has_reply_image = reply_image_url is not None
|
|
2230
2633
|
try:
|
|
2231
2634
|
intent_raw = await _classify_intent(
|
|
2232
|
-
text, state, has_image, has_reply_image,
|
|
2635
|
+
text, state, has_image, has_reply_image, at_users
|
|
2233
2636
|
)
|
|
2234
2637
|
except Exception as exc:
|
|
2235
2638
|
logger.error("Intent classify failed: {}", _safe_error_message(exc))
|
|
2236
2639
|
await send_func("意图解析失败,请稍后再试。")
|
|
2237
2640
|
return
|
|
2238
|
-
intent = _normalize_intent(intent_raw, has_image, has_reply_image,
|
|
2641
|
+
intent = _normalize_intent(intent_raw, has_image, has_reply_image, at_users, state)
|
|
2239
2642
|
if not intent:
|
|
2240
2643
|
await send_func(_clarify_intent_text(has_image))
|
|
2241
2644
|
return
|
|
@@ -2251,7 +2654,7 @@ async def _handle_command_via_intent(
|
|
|
2251
2654
|
text,
|
|
2252
2655
|
image_url=image_url,
|
|
2253
2656
|
reply_image_url=reply_image_url,
|
|
2254
|
-
|
|
2657
|
+
at_users=at_users,
|
|
2255
2658
|
send_func=send_func,
|
|
2256
2659
|
)
|
|
2257
2660
|
|
|
@@ -14,6 +14,12 @@ class Config(BaseModel):
|
|
|
14
14
|
image_timeout: float = 120.0
|
|
15
15
|
history_ttl_sec: int = 600
|
|
16
16
|
history_max_messages: int = 10
|
|
17
|
+
history_compress_enable: bool = True
|
|
18
|
+
history_compress_trigger: int = 20
|
|
19
|
+
history_compress_keep: int = 6
|
|
20
|
+
history_compress_min_messages: int = 6
|
|
21
|
+
history_compress_max_chars: int = 600
|
|
22
|
+
history_reference_only: bool = True
|
|
17
23
|
forward_line_threshold: int = 8
|
|
18
24
|
message_send_delay_sec: float = 0.6
|
|
19
25
|
gemini_log_response: bool = False
|
|
File without changes
|