ultra-memory 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CLAWHUB.md +190 -0
- package/LICENSE +21 -0
- package/README.md +195 -0
- package/SKILL.md +383 -0
- package/package.json +107 -0
- package/platform/SYSTEM_PROMPT.md +184 -0
- package/platform/__pycache__/server.cpython-313.pyc +0 -0
- package/platform/openapi.yaml +305 -0
- package/platform/server.py +454 -0
- package/platform/tools_gemini.json +176 -0
- package/platform/tools_openai.json +207 -0
- package/scripts/__pycache__/cleanup.cpython-313.pyc +0 -0
- package/scripts/__pycache__/export.cpython-313.pyc +0 -0
- package/scripts/__pycache__/extract_entities.cpython-313.pyc +0 -0
- package/scripts/__pycache__/init.cpython-313.pyc +0 -0
- package/scripts/__pycache__/log_op.cpython-313.pyc +0 -0
- package/scripts/__pycache__/recall.cpython-313.pyc +0 -0
- package/scripts/__pycache__/restore.cpython-313.pyc +0 -0
- package/scripts/__pycache__/summarize.cpython-313.pyc +0 -0
- package/scripts/cleanup.py +156 -0
- package/scripts/export.py +158 -0
- package/scripts/extract_entities.py +289 -0
- package/scripts/init.py +243 -0
- package/scripts/log_op.py +328 -0
- package/scripts/mcp-server.js +341 -0
- package/scripts/recall.py +683 -0
- package/scripts/restore.py +267 -0
- package/scripts/summarize.py +389 -0
|
@@ -0,0 +1,267 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
ultra-memory: 会话恢复脚本
|
|
4
|
+
在新会话开始时,自动加载上次会话的上下文并注入提示
|
|
5
|
+
优化:自然语言总结 + 任务完成状态识别 + 继续/下阶段建议
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import os
|
|
9
|
+
import sys
|
|
10
|
+
import json
|
|
11
|
+
import argparse
|
|
12
|
+
from datetime import datetime, timezone
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
|
|
15
|
+
if sys.stdout.encoding != "utf-8":
|
|
16
|
+
sys.stdout.reconfigure(encoding="utf-8")
|
|
17
|
+
if sys.stderr.encoding != "utf-8":
|
|
18
|
+
sys.stderr.reconfigure(encoding="utf-8")
|
|
19
|
+
|
|
20
|
+
ULTRA_MEMORY_HOME = Path(os.environ.get("ULTRA_MEMORY_HOME", Path.home() / ".ultra-memory"))
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def find_latest_session(project: str):
|
|
24
|
+
sessions_dir = ULTRA_MEMORY_HOME / "sessions"
|
|
25
|
+
if not sessions_dir.exists():
|
|
26
|
+
return None
|
|
27
|
+
candidates = []
|
|
28
|
+
for d in sessions_dir.iterdir():
|
|
29
|
+
if not d.is_dir():
|
|
30
|
+
continue
|
|
31
|
+
meta_file = d / "meta.json"
|
|
32
|
+
if not meta_file.exists():
|
|
33
|
+
continue
|
|
34
|
+
with open(meta_file, encoding="utf-8") as f:
|
|
35
|
+
meta = json.load(f)
|
|
36
|
+
if meta.get("project") == project:
|
|
37
|
+
candidates.append((meta, d))
|
|
38
|
+
if not candidates:
|
|
39
|
+
return None
|
|
40
|
+
candidates.sort(key=lambda x: x[0]["started_at"], reverse=True)
|
|
41
|
+
return candidates[0]
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def load_recent_ops(session_dir: Path, n: int = 10) -> list[dict]:
|
|
45
|
+
"""加载最近 n 条操作(不论是否压缩)"""
|
|
46
|
+
ops_file = session_dir / "ops.jsonl"
|
|
47
|
+
if not ops_file.exists():
|
|
48
|
+
return []
|
|
49
|
+
all_ops = []
|
|
50
|
+
with open(ops_file, encoding="utf-8") as f:
|
|
51
|
+
for line in f:
|
|
52
|
+
line = line.strip()
|
|
53
|
+
if not line:
|
|
54
|
+
continue
|
|
55
|
+
try:
|
|
56
|
+
all_ops.append(json.loads(line))
|
|
57
|
+
except json.JSONDecodeError:
|
|
58
|
+
continue
|
|
59
|
+
return all_ops[-n:]
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def load_summary(session_dir: Path) -> str:
|
|
63
|
+
summary_file = session_dir / "summary.md"
|
|
64
|
+
if not summary_file.exists():
|
|
65
|
+
return ""
|
|
66
|
+
with open(summary_file, encoding="utf-8") as f:
|
|
67
|
+
content = f.read()
|
|
68
|
+
# 只取最后一个摘要块(--- 分隔)
|
|
69
|
+
blocks = content.split("---")
|
|
70
|
+
return blocks[-1].strip() if blocks else content.strip()
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def detect_completion_status(meta: dict, session_dir: Path) -> tuple[bool, str]:
|
|
74
|
+
"""
|
|
75
|
+
自动识别任务是否完成。
|
|
76
|
+
判断依据:里程碑数量 vs 操作总数的比例。
|
|
77
|
+
- 里程碑比例 > 30% 且最后操作是 milestone → 认为已完成
|
|
78
|
+
- 否则认为未完成
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
(is_complete, status_desc)
|
|
82
|
+
"""
|
|
83
|
+
op_count = meta.get("op_count", 0)
|
|
84
|
+
last_milestone = meta.get("last_milestone", "")
|
|
85
|
+
|
|
86
|
+
# 统计里程碑数量
|
|
87
|
+
milestone_count = 0
|
|
88
|
+
last_op_type = ""
|
|
89
|
+
ops_file = session_dir / "ops.jsonl"
|
|
90
|
+
if ops_file.exists():
|
|
91
|
+
with open(ops_file, encoding="utf-8") as f:
|
|
92
|
+
for line in f:
|
|
93
|
+
line = line.strip()
|
|
94
|
+
if not line:
|
|
95
|
+
continue
|
|
96
|
+
try:
|
|
97
|
+
op = json.loads(line)
|
|
98
|
+
if op.get("type") == "milestone" or "milestone" in op.get("tags", []):
|
|
99
|
+
milestone_count += 1
|
|
100
|
+
last_op_type = op.get("type", "")
|
|
101
|
+
except json.JSONDecodeError:
|
|
102
|
+
continue
|
|
103
|
+
|
|
104
|
+
if op_count == 0:
|
|
105
|
+
return False, "未开始"
|
|
106
|
+
|
|
107
|
+
milestone_ratio = milestone_count / max(op_count, 1)
|
|
108
|
+
|
|
109
|
+
if milestone_ratio > 0.3 and last_op_type == "milestone":
|
|
110
|
+
return True, f"已完成({milestone_count} 个里程碑,共 {op_count} 步操作)"
|
|
111
|
+
elif milestone_count > 0:
|
|
112
|
+
return False, f"进行中(已达成 {milestone_count} 个里程碑,共 {op_count} 步)"
|
|
113
|
+
else:
|
|
114
|
+
return False, f"进行中(共 {op_count} 步操作,暂无里程碑)"
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def generate_natural_language_summary(
|
|
118
|
+
project: str,
|
|
119
|
+
started_at: str,
|
|
120
|
+
last_milestone: str,
|
|
121
|
+
is_complete: bool,
|
|
122
|
+
status_desc: str,
|
|
123
|
+
recent_ops: list[dict],
|
|
124
|
+
) -> str:
|
|
125
|
+
"""
|
|
126
|
+
生成 50 字以内的自然语言恢复提示(中文)。
|
|
127
|
+
"""
|
|
128
|
+
date_str = started_at[:10]
|
|
129
|
+
|
|
130
|
+
if last_milestone:
|
|
131
|
+
action = last_milestone[:20]
|
|
132
|
+
elif recent_ops:
|
|
133
|
+
action = recent_ops[-1].get("summary", "进行相关操作")[:20]
|
|
134
|
+
else:
|
|
135
|
+
action = "进行操作"
|
|
136
|
+
|
|
137
|
+
if is_complete:
|
|
138
|
+
return f"上次({date_str})在 {project} 中完成了{action},任务已结束。"
|
|
139
|
+
else:
|
|
140
|
+
return f"上次({date_str})在 {project} 中进行了{action},尚未完成。"
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def generate_continuation_advice(
|
|
144
|
+
is_complete: bool,
|
|
145
|
+
last_milestone: str,
|
|
146
|
+
recent_ops: list[dict],
|
|
147
|
+
summary_content: str,
|
|
148
|
+
) -> str:
|
|
149
|
+
"""
|
|
150
|
+
根据完成状态生成继续建议或下阶段建议。
|
|
151
|
+
"""
|
|
152
|
+
# 从 summary 中提取"下一步建议"
|
|
153
|
+
next_step = ""
|
|
154
|
+
if summary_content:
|
|
155
|
+
for line in summary_content.split("\n"):
|
|
156
|
+
if "下一步建议" in line or "💡" in line:
|
|
157
|
+
continue
|
|
158
|
+
if line.startswith("- ") and next_step == "":
|
|
159
|
+
# 取"下一步建议"区块下的第一条
|
|
160
|
+
pass
|
|
161
|
+
# 简单提取:找到 💡 区块后的第一个 - 开头的行
|
|
162
|
+
lines = summary_content.split("\n")
|
|
163
|
+
in_next_section = False
|
|
164
|
+
for line in lines:
|
|
165
|
+
if "下一步建议" in line or "💡" in line:
|
|
166
|
+
in_next_section = True
|
|
167
|
+
continue
|
|
168
|
+
if in_next_section and line.startswith("- "):
|
|
169
|
+
next_step = line[2:].strip()
|
|
170
|
+
break
|
|
171
|
+
if in_next_section and line.startswith("##"):
|
|
172
|
+
break
|
|
173
|
+
|
|
174
|
+
if is_complete:
|
|
175
|
+
if last_milestone:
|
|
176
|
+
return f"上阶段已完成:{last_milestone[:30]}。建议开启新阶段或确认验收。"
|
|
177
|
+
return "上次任务已完成,建议进行收尾验证或开启新阶段。"
|
|
178
|
+
else:
|
|
179
|
+
if next_step:
|
|
180
|
+
return f"建议继续:{next_step[:40]}"
|
|
181
|
+
if recent_ops:
|
|
182
|
+
last = recent_ops[-1]
|
|
183
|
+
op_type = last.get("type", "")
|
|
184
|
+
summary = last.get("summary", "")[:30]
|
|
185
|
+
if op_type == "error":
|
|
186
|
+
return f"上次遇到错误:{summary},建议先排查此问题。"
|
|
187
|
+
elif op_type == "file_write":
|
|
188
|
+
return f"上次写入了文件,建议运行测试验证结果。"
|
|
189
|
+
elif op_type == "bash_exec":
|
|
190
|
+
return f"上次执行了命令,建议确认结果后继续。"
|
|
191
|
+
else:
|
|
192
|
+
return f"建议从上次中断处继续:{summary}"
|
|
193
|
+
return "建议回顾摘要后继续未完成的任务。"
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def restore(project: str, verbose: bool = False):
|
|
197
|
+
result = find_latest_session(project)
|
|
198
|
+
if not result:
|
|
199
|
+
print(f"[ultra-memory] 未找到项目 '{project}' 的历史会话,将从头开始")
|
|
200
|
+
return
|
|
201
|
+
|
|
202
|
+
meta, session_dir = result
|
|
203
|
+
session_id = meta["session_id"]
|
|
204
|
+
started_at = meta["started_at"][:10]
|
|
205
|
+
op_count = meta.get("op_count", 0)
|
|
206
|
+
last_milestone = meta.get("last_milestone", "")
|
|
207
|
+
|
|
208
|
+
# 检测任务完成状态
|
|
209
|
+
is_complete, status_desc = detect_completion_status(meta, session_dir)
|
|
210
|
+
|
|
211
|
+
# 加载摘要和最近操作
|
|
212
|
+
summary = load_summary(session_dir)
|
|
213
|
+
recent_ops = load_recent_ops(session_dir, n=5)
|
|
214
|
+
|
|
215
|
+
# 生成自然语言总结(50字内)
|
|
216
|
+
nl_summary = generate_natural_language_summary(
|
|
217
|
+
project, started_at, last_milestone, is_complete, status_desc, recent_ops
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
# 生成继续建议
|
|
221
|
+
advice = generate_continuation_advice(is_complete, last_milestone, recent_ops, summary)
|
|
222
|
+
|
|
223
|
+
# 输出恢复上下文
|
|
224
|
+
print(f"\n[ULTRA-MEMORY RESTORE] 找到上次会话:")
|
|
225
|
+
print(f" 会话 ID : {session_id}")
|
|
226
|
+
print(f" 项目 : {project}")
|
|
227
|
+
print(f" 时间 : {started_at}")
|
|
228
|
+
print(f" 操作数 : {op_count} 条")
|
|
229
|
+
print(f" 状态 : {status_desc}")
|
|
230
|
+
if last_milestone:
|
|
231
|
+
print(f" 最后里程碑: {last_milestone}")
|
|
232
|
+
|
|
233
|
+
# 自然语言总结(供 Claude 直接注入 context)
|
|
234
|
+
print(f"\n💬 {nl_summary}")
|
|
235
|
+
|
|
236
|
+
# 展示摘要关键部分
|
|
237
|
+
if summary:
|
|
238
|
+
print(f"\n--- 上次会话摘要(关键部分)---")
|
|
239
|
+
for line in summary.split("\n"):
|
|
240
|
+
if any(kw in line for kw in ["##", "✅", "🔄", "⚠️", "🔑", "💡"]):
|
|
241
|
+
print(line)
|
|
242
|
+
elif line.startswith("- ") and len(line) < 100:
|
|
243
|
+
print(f" {line}")
|
|
244
|
+
print("---")
|
|
245
|
+
|
|
246
|
+
# 最近操作(verbose 模式或未完成时显示)
|
|
247
|
+
if recent_ops and (verbose or not is_complete):
|
|
248
|
+
print(f"\n最近 {len(recent_ops)} 条操作:")
|
|
249
|
+
for op in recent_ops:
|
|
250
|
+
ts = op["ts"][11:16]
|
|
251
|
+
marker = "✅" if op["type"] == "milestone" else " "
|
|
252
|
+
print(f" {marker} [{ts}] #{op['seq']} {op['type']}: {op['summary'][:60]}")
|
|
253
|
+
|
|
254
|
+
# 继续建议
|
|
255
|
+
print(f"\n📌 {advice}")
|
|
256
|
+
|
|
257
|
+
print(f"\n[ultra-memory] ✅ 上下文恢复完成")
|
|
258
|
+
print(f"[ultra-memory] SESSION_ID={session_id}")
|
|
259
|
+
print(f"[ultra-memory] TASK_STATUS={'complete' if is_complete else 'in_progress'}")
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
if __name__ == "__main__":
|
|
263
|
+
parser = argparse.ArgumentParser(description="恢复上次会话上下文")
|
|
264
|
+
parser.add_argument("--project", default="default", help="项目名称")
|
|
265
|
+
parser.add_argument("--verbose", action="store_true", help="显示详细操作记录")
|
|
266
|
+
args = parser.parse_args()
|
|
267
|
+
restore(args.project, args.verbose)
|
|
@@ -0,0 +1,389 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
ultra-memory: 会话摘要压缩脚本
|
|
4
|
+
将 ops.jsonl 中的操作日志压缩为结构化 summary.md
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
import sys
|
|
9
|
+
import json
|
|
10
|
+
import argparse
|
|
11
|
+
from datetime import datetime, timezone
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from collections import Counter
|
|
14
|
+
|
|
15
|
+
if sys.stdout.encoding != "utf-8":
|
|
16
|
+
sys.stdout.reconfigure(encoding="utf-8")
|
|
17
|
+
if sys.stderr.encoding != "utf-8":
|
|
18
|
+
sys.stderr.reconfigure(encoding="utf-8")
|
|
19
|
+
|
|
20
|
+
ULTRA_MEMORY_HOME = Path(os.environ.get("ULTRA_MEMORY_HOME", Path.home() / ".ultra-memory"))
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def load_ops(session_dir: Path, only_uncompressed: bool = True) -> list[dict]:
|
|
24
|
+
ops_file = session_dir / "ops.jsonl"
|
|
25
|
+
if not ops_file.exists():
|
|
26
|
+
return []
|
|
27
|
+
ops = []
|
|
28
|
+
with open(ops_file, encoding="utf-8") as f:
|
|
29
|
+
for line in f:
|
|
30
|
+
line = line.strip()
|
|
31
|
+
if not line:
|
|
32
|
+
continue
|
|
33
|
+
op = json.loads(line)
|
|
34
|
+
if only_uncompressed and op.get("compressed"):
|
|
35
|
+
continue
|
|
36
|
+
ops.append(op)
|
|
37
|
+
return ops
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def mark_compressed(session_dir: Path, up_to_seq: int):
|
|
41
|
+
"""标记已压缩的操作(不删除,只打标记)"""
|
|
42
|
+
ops_file = session_dir / "ops.jsonl"
|
|
43
|
+
tmp_file = session_dir / "ops.jsonl.tmp"
|
|
44
|
+
with open(ops_file, encoding="utf-8") as fin, open(tmp_file, "w", encoding="utf-8") as fout:
|
|
45
|
+
for line in fin:
|
|
46
|
+
line = line.strip()
|
|
47
|
+
if not line:
|
|
48
|
+
continue
|
|
49
|
+
op = json.loads(line)
|
|
50
|
+
if op["seq"] <= up_to_seq:
|
|
51
|
+
op["compressed"] = True
|
|
52
|
+
fout.write(json.dumps(op, ensure_ascii=False) + "\n")
|
|
53
|
+
tmp_file.replace(ops_file)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def group_by_tag(ops: list[dict]) -> dict[str, list]:
|
|
57
|
+
groups = {}
|
|
58
|
+
for op in ops:
|
|
59
|
+
for tag in (op.get("tags") or ["general"]):
|
|
60
|
+
groups.setdefault(tag, []).append(op)
|
|
61
|
+
return groups
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def extract_milestones(ops: list[dict]) -> list[dict]:
|
|
65
|
+
return [op for op in ops if op["type"] == "milestone" or "milestone" in op.get("tags", [])]
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def extract_errors(ops: list[dict]) -> list[dict]:
|
|
69
|
+
return [op for op in ops if op["type"] == "error" or "error" in op.get("tags", [])]
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def extract_decisions(ops: list[dict]) -> list[dict]:
|
|
73
|
+
return [op for op in ops if op["type"] == "decision"]
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def extract_file_changes(ops: list[dict]) -> list[str]:
|
|
77
|
+
seen = []
|
|
78
|
+
for op in ops:
|
|
79
|
+
if op["type"] == "file_write":
|
|
80
|
+
path = op.get("detail", {}).get("path", "")
|
|
81
|
+
if path and path not in seen:
|
|
82
|
+
seen.append(path)
|
|
83
|
+
return seen
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def infer_in_progress(ops: list[dict]) -> list[dict]:
|
|
87
|
+
"""
|
|
88
|
+
推断"当前进行中"的任务:
|
|
89
|
+
取最后 5 条非 milestone、非 error 的操作,判断是否存在未完成信号。
|
|
90
|
+
"""
|
|
91
|
+
milestone_types = {"milestone"}
|
|
92
|
+
error_types = {"error"}
|
|
93
|
+
candidates = [
|
|
94
|
+
op for op in ops
|
|
95
|
+
if op["type"] not in milestone_types | error_types
|
|
96
|
+
and "milestone" not in op.get("tags", [])
|
|
97
|
+
]
|
|
98
|
+
return candidates[-5:] if candidates else []
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def infer_next_step(ops: list[dict], in_progress: list[dict]) -> str:
|
|
102
|
+
"""
|
|
103
|
+
基于最后操作推断"下一步建议"。
|
|
104
|
+
规则:
|
|
105
|
+
- 如果最后操作是 bash_exec → 建议验证结果
|
|
106
|
+
- 如果最后操作是 file_write → 建议运行测试
|
|
107
|
+
- 如果最后操作是 error → 建议排查错误
|
|
108
|
+
- 如果最后操作是 reasoning/decision → 建议开始实现
|
|
109
|
+
- 其他 → 建议继续当前任务
|
|
110
|
+
"""
|
|
111
|
+
if not in_progress:
|
|
112
|
+
return "继续当前任务"
|
|
113
|
+
|
|
114
|
+
last = in_progress[-1]
|
|
115
|
+
op_type = last.get("type", "")
|
|
116
|
+
summary = last.get("summary", "")
|
|
117
|
+
|
|
118
|
+
if op_type == "bash_exec":
|
|
119
|
+
return f"验证上一条命令的执行结果,确认 {summary[:30]} 生效"
|
|
120
|
+
elif op_type == "file_write":
|
|
121
|
+
return f"为刚写入的文件编写或运行测试"
|
|
122
|
+
elif op_type == "error":
|
|
123
|
+
return f"排查错误:{summary[:40]}"
|
|
124
|
+
elif op_type in ("reasoning", "decision"):
|
|
125
|
+
return f"根据决策开始实现:{summary[:40]}"
|
|
126
|
+
elif op_type == "file_read":
|
|
127
|
+
return f"基于已读取的内容进行下一步修改"
|
|
128
|
+
else:
|
|
129
|
+
return f"继续:{summary[:40]}"
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def generate_summary_md(session_id: str, ops: list[dict], meta: dict) -> str:
|
|
133
|
+
now = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M")
|
|
134
|
+
project = meta.get("project", "default")
|
|
135
|
+
op_range = f"第 {ops[0]['seq']}-{ops[-1]['seq']} 条" if ops else "无"
|
|
136
|
+
|
|
137
|
+
milestones = extract_milestones(ops)
|
|
138
|
+
errors = extract_errors(ops)
|
|
139
|
+
decisions = extract_decisions(ops)
|
|
140
|
+
file_changes = extract_file_changes(ops)
|
|
141
|
+
in_progress = infer_in_progress(ops)
|
|
142
|
+
next_step = infer_next_step(ops, in_progress)
|
|
143
|
+
|
|
144
|
+
# 统计操作类型分布
|
|
145
|
+
type_counts = Counter(op["type"] for op in ops)
|
|
146
|
+
tag_counts = Counter(tag for op in ops for tag in op.get("tags", []))
|
|
147
|
+
|
|
148
|
+
lines = [
|
|
149
|
+
f"# 会话摘要 — {session_id}",
|
|
150
|
+
f"更新时间: {now} UTC",
|
|
151
|
+
f"项目: {project}",
|
|
152
|
+
"",
|
|
153
|
+
]
|
|
154
|
+
|
|
155
|
+
# 已完成里程碑
|
|
156
|
+
if milestones:
|
|
157
|
+
lines.append("## ✅ 已完成里程碑")
|
|
158
|
+
for m in milestones:
|
|
159
|
+
ts = m["ts"][:16].replace("T", " ")
|
|
160
|
+
lines.append(f"- [{ts}] {m['summary']}")
|
|
161
|
+
lines.append("")
|
|
162
|
+
|
|
163
|
+
# 当前进行中(新增,帮助跨天恢复时快速定位状态)
|
|
164
|
+
if in_progress:
|
|
165
|
+
lines.append("## 🔄 当前进行中")
|
|
166
|
+
for op in in_progress:
|
|
167
|
+
ts = op["ts"][11:16]
|
|
168
|
+
lines.append(f"- [ ] [{ts}] {op['summary'][:80]}")
|
|
169
|
+
lines.append("")
|
|
170
|
+
|
|
171
|
+
# 下一步建议(新增,方便 Claude 恢复后立即行动)
|
|
172
|
+
lines.append("## 💡 下一步建议")
|
|
173
|
+
lines.append(f"- {next_step}")
|
|
174
|
+
lines.append("")
|
|
175
|
+
|
|
176
|
+
# 文件变更
|
|
177
|
+
if file_changes:
|
|
178
|
+
lines.append("## 📁 涉及文件")
|
|
179
|
+
for path in file_changes[:20]:
|
|
180
|
+
lines.append(f"- `{path}`")
|
|
181
|
+
lines.append("")
|
|
182
|
+
|
|
183
|
+
# 关键决策
|
|
184
|
+
if decisions:
|
|
185
|
+
lines.append("## 🔑 关键决策")
|
|
186
|
+
for d in decisions:
|
|
187
|
+
lines.append(f"- {d['summary']}")
|
|
188
|
+
detail = d.get("detail", {})
|
|
189
|
+
if detail.get("rationale"):
|
|
190
|
+
lines.append(f" - 依据: {detail['rationale']}")
|
|
191
|
+
lines.append("")
|
|
192
|
+
|
|
193
|
+
# 错误与处理
|
|
194
|
+
if errors:
|
|
195
|
+
lines.append("## ⚠️ 错误与处理")
|
|
196
|
+
for e in errors:
|
|
197
|
+
ts = e["ts"][:16].replace("T", " ")
|
|
198
|
+
lines.append(f"- [{ts}] {e['summary']}")
|
|
199
|
+
lines.append("")
|
|
200
|
+
|
|
201
|
+
# 操作统计
|
|
202
|
+
lines.append("## 📊 操作统计")
|
|
203
|
+
for op_type, count in type_counts.most_common():
|
|
204
|
+
lines.append(f"- {op_type}: {count} 次")
|
|
205
|
+
if tag_counts:
|
|
206
|
+
top_tags = [t for t, _ in tag_counts.most_common(5)]
|
|
207
|
+
lines.append(f"- 主要领域: {', '.join(top_tags)}")
|
|
208
|
+
lines.append("")
|
|
209
|
+
|
|
210
|
+
# 操作日志范围(供下次恢复参考)
|
|
211
|
+
lines.append("## 📋 操作日志范围")
|
|
212
|
+
lines.append(f"ops.jsonl {op_range}(已压缩,原始记录保留)")
|
|
213
|
+
lines.append("")
|
|
214
|
+
|
|
215
|
+
return "\n".join(lines)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
# summary.md 触发元压缩的字符阈值(约 3000 字符 ≈ 5~6 个压缩块)
|
|
219
|
+
META_SUMMARY_THRESHOLD = 3000
|
|
220
|
+
|
|
221
|
+
# 重要操作类型:压缩时保留原文
|
|
222
|
+
HIGH_IMPORTANCE_TYPES = {"milestone", "decision", "error"}
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def build_meta_summary_block(blocks: list[str]) -> str:
|
|
226
|
+
"""
|
|
227
|
+
将多个 summary 块二次压缩为一个 meta_summary 块。
|
|
228
|
+
只保留:已完成里程碑、关键决策、错误、下一步建议。
|
|
229
|
+
其余操作统计、文件列表等统一折叠为一行。
|
|
230
|
+
|
|
231
|
+
meta_summary 结构比 summary 更紧凑,约 5 倍压缩比。
|
|
232
|
+
"""
|
|
233
|
+
now = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M")
|
|
234
|
+
|
|
235
|
+
# 从各块提取关键行(以 - [ ] 、 ✅、🔑、⚠️、💡 开头的行)
|
|
236
|
+
key_lines: list[str] = []
|
|
237
|
+
op_counts: Counter = Counter()
|
|
238
|
+
for block in blocks:
|
|
239
|
+
for line in block.splitlines():
|
|
240
|
+
stripped = line.strip()
|
|
241
|
+
if not stripped:
|
|
242
|
+
continue
|
|
243
|
+
# 保留里程碑、决策、错误、建议
|
|
244
|
+
if any(m in stripped for m in ["✅", "🔑", "⚠️", "💡", "- [x]", "[09", "[10", "[11",
|
|
245
|
+
"[12", "[13", "[14", "[15", "[16", "[17", "[18", "[19",
|
|
246
|
+
"[20", "[21", "[22", "[23"]):
|
|
247
|
+
key_lines.append(f" {stripped}")
|
|
248
|
+
# 统计操作类型
|
|
249
|
+
m = re.match(r"- (\w+): (\d+) 次", stripped)
|
|
250
|
+
if m:
|
|
251
|
+
op_counts[m.group(1)] += int(m.group(2))
|
|
252
|
+
|
|
253
|
+
lines = [
|
|
254
|
+
f"# [META] 历史摘要 — 压缩自 {len(blocks)} 个摘要块",
|
|
255
|
+
f"压缩时间: {now} UTC",
|
|
256
|
+
f"(此块包含多次会话的核心信息,细节见各会话 summary.md)",
|
|
257
|
+
"",
|
|
258
|
+
]
|
|
259
|
+
if key_lines:
|
|
260
|
+
lines.append("## 历史关键事件(里程碑/决策/错误)")
|
|
261
|
+
lines.extend(key_lines[:30]) # 最多保留30条关键行
|
|
262
|
+
lines.append("")
|
|
263
|
+
if op_counts:
|
|
264
|
+
lines.append("## 历史操作总量")
|
|
265
|
+
for op_type, count in op_counts.most_common():
|
|
266
|
+
lines.append(f"- {op_type}: 累计 {count} 次")
|
|
267
|
+
lines.append("")
|
|
268
|
+
|
|
269
|
+
return "\n".join(lines)
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
def maybe_meta_compress(session_dir: Path, summary_file: Path):
|
|
273
|
+
"""
|
|
274
|
+
检查 summary.md 是否超过阈值,超过则对历史块做元压缩。
|
|
275
|
+
元压缩后将历史块替换为一个 [META] 块,最新块保持不变。
|
|
276
|
+
|
|
277
|
+
机制(分层记忆):
|
|
278
|
+
ops.jsonl → [50ops] → summary块 (每50ops一块)
|
|
279
|
+
summary块 × N → [META]块 (每N块合并一次)
|
|
280
|
+
[META]块本身也可再次合并 (理论上无限深度)
|
|
281
|
+
"""
|
|
282
|
+
if not summary_file.exists():
|
|
283
|
+
return
|
|
284
|
+
|
|
285
|
+
content = summary_file.read_text(encoding="utf-8")
|
|
286
|
+
if len(content) < META_SUMMARY_THRESHOLD:
|
|
287
|
+
return # 还未达到阈值
|
|
288
|
+
|
|
289
|
+
# 按 --- 分隔成各个块
|
|
290
|
+
raw_blocks = [b.strip() for b in content.split("---") if b.strip()]
|
|
291
|
+
if len(raw_blocks) < 4:
|
|
292
|
+
return # 块数太少,不压缩
|
|
293
|
+
|
|
294
|
+
# 保留最新的 2 个块原样,对其余的块做元压缩
|
|
295
|
+
blocks_to_compress = raw_blocks[:-2]
|
|
296
|
+
blocks_to_keep = raw_blocks[-2:]
|
|
297
|
+
|
|
298
|
+
meta_block = build_meta_summary_block(blocks_to_compress)
|
|
299
|
+
|
|
300
|
+
new_content = meta_block + "\n\n---\n\n" + "\n\n---\n\n".join(blocks_to_keep)
|
|
301
|
+
summary_file.write_text(new_content, encoding="utf-8")
|
|
302
|
+
|
|
303
|
+
saved_chars = len(content) - len(new_content)
|
|
304
|
+
print(f"[ultra-memory] 🗜️ 元压缩完成:{len(blocks_to_compress)} 块 → 1 个 [META] 块,"
|
|
305
|
+
f"节省 {saved_chars} 字符({saved_chars*100//max(len(content),1)}%)")
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
import re
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
def summarize(session_id: str, force: bool = False):
|
|
312
|
+
session_dir = ULTRA_MEMORY_HOME / "sessions" / session_id
|
|
313
|
+
if not session_dir.exists():
|
|
314
|
+
print(f"[ultra-memory] ❌ 会话不存在: {session_id}")
|
|
315
|
+
return
|
|
316
|
+
|
|
317
|
+
meta_file = session_dir / "meta.json"
|
|
318
|
+
meta = {}
|
|
319
|
+
if meta_file.exists():
|
|
320
|
+
with open(meta_file, encoding="utf-8") as f:
|
|
321
|
+
meta = json.load(f)
|
|
322
|
+
|
|
323
|
+
ops = load_ops(session_dir, only_uncompressed=True)
|
|
324
|
+
if len(ops) < 10 and not force:
|
|
325
|
+
print(f"[ultra-memory] ⏭️ 操作条数不足({len(ops)} 条),跳过压缩(用 --force 强制执行)")
|
|
326
|
+
return
|
|
327
|
+
|
|
328
|
+
if not ops:
|
|
329
|
+
print("[ultra-memory] 无新操作需要压缩")
|
|
330
|
+
return
|
|
331
|
+
|
|
332
|
+
summary_content = generate_summary_md(session_id, ops, meta)
|
|
333
|
+
summary_file = session_dir / "summary.md"
|
|
334
|
+
|
|
335
|
+
# 追加到现有摘要(不覆盖,保留历史压缩记录)
|
|
336
|
+
mode = "a" if summary_file.exists() else "w"
|
|
337
|
+
if mode == "a":
|
|
338
|
+
summary_content = "\n---\n\n" + summary_content
|
|
339
|
+
with open(summary_file, mode, encoding="utf-8") as f:
|
|
340
|
+
f.write(summary_content)
|
|
341
|
+
|
|
342
|
+
# 标记已压缩
|
|
343
|
+
last_seq = ops[-1]["seq"]
|
|
344
|
+
mark_compressed(session_dir, last_seq)
|
|
345
|
+
|
|
346
|
+
# 更新 meta
|
|
347
|
+
meta["last_summary_at"] = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
|
|
348
|
+
meta["last_milestone"] = ops[-1]["summary"] if ops else meta.get("last_milestone")
|
|
349
|
+
with open(meta_file, "w", encoding="utf-8") as f:
|
|
350
|
+
json.dump(meta, f, ensure_ascii=False, indent=2)
|
|
351
|
+
|
|
352
|
+
# 同步到 Layer 3 索引
|
|
353
|
+
sync_to_semantic(session_id, meta, ops)
|
|
354
|
+
|
|
355
|
+
# 【新】检查是否需要元压缩(分层记忆核心)
|
|
356
|
+
maybe_meta_compress(session_dir, summary_file)
|
|
357
|
+
|
|
358
|
+
print(f"[ultra-memory] ✅ 摘要压缩完成,{len(ops)} 条操作 → summary.md")
|
|
359
|
+
print(f"[ultra-memory] 摘要路径: {summary_file}")
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
def sync_to_semantic(session_id: str, meta: dict, ops: list[dict]):
|
|
363
|
+
"""将里程碑和关键信息同步到 Layer 3"""
|
|
364
|
+
semantic_dir = ULTRA_MEMORY_HOME / "semantic"
|
|
365
|
+
index_file = semantic_dir / "session_index.json"
|
|
366
|
+
if not index_file.exists():
|
|
367
|
+
return
|
|
368
|
+
with open(index_file, encoding="utf-8") as f:
|
|
369
|
+
index = json.load(f)
|
|
370
|
+
for s in index.get("sessions", []):
|
|
371
|
+
if s["session_id"] == session_id:
|
|
372
|
+
milestones = extract_milestones(ops)
|
|
373
|
+
if milestones:
|
|
374
|
+
s["last_milestone"] = milestones[-1]["summary"]
|
|
375
|
+
break
|
|
376
|
+
with open(index_file, "w", encoding="utf-8") as f:
|
|
377
|
+
json.dump(index, f, ensure_ascii=False, indent=2)
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
def extract_milestones(ops):
|
|
381
|
+
return [op for op in ops if op["type"] == "milestone" or "milestone" in op.get("tags", [])]
|
|
382
|
+
|
|
383
|
+
|
|
384
|
+
if __name__ == "__main__":
|
|
385
|
+
parser = argparse.ArgumentParser(description="触发会话摘要压缩")
|
|
386
|
+
parser.add_argument("--session", required=True, help="会话 ID")
|
|
387
|
+
parser.add_argument("--force", action="store_true", help="强制压缩(即使条数不足)")
|
|
388
|
+
args = parser.parse_args()
|
|
389
|
+
summarize(args.session, args.force)
|