@purpleraven/hits 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/AGENTS.md +298 -0
- package/LICENSE +190 -0
- package/README.md +336 -0
- package/bin/hits.js +56 -0
- package/config/schema.json +94 -0
- package/config/settings.yaml +102 -0
- package/data/dev_handover.yaml +143 -0
- package/hits_core/__init__.py +9 -0
- package/hits_core/ai/__init__.py +11 -0
- package/hits_core/ai/compressor.py +86 -0
- package/hits_core/ai/llm_client.py +65 -0
- package/hits_core/ai/slm_filter.py +126 -0
- package/hits_core/api/__init__.py +3 -0
- package/hits_core/api/routes/__init__.py +8 -0
- package/hits_core/api/routes/auth.py +211 -0
- package/hits_core/api/routes/handover.py +117 -0
- package/hits_core/api/routes/health.py +8 -0
- package/hits_core/api/routes/knowledge.py +177 -0
- package/hits_core/api/routes/node.py +121 -0
- package/hits_core/api/routes/work_log.py +174 -0
- package/hits_core/api/server.py +181 -0
- package/hits_core/auth/__init__.py +21 -0
- package/hits_core/auth/dependencies.py +61 -0
- package/hits_core/auth/manager.py +368 -0
- package/hits_core/auth/middleware.py +69 -0
- package/hits_core/collector/__init__.py +18 -0
- package/hits_core/collector/ai_session_collector.py +118 -0
- package/hits_core/collector/base.py +73 -0
- package/hits_core/collector/daemon.py +94 -0
- package/hits_core/collector/git_collector.py +177 -0
- package/hits_core/collector/hits_action_collector.py +110 -0
- package/hits_core/collector/shell_collector.py +178 -0
- package/hits_core/main.py +36 -0
- package/hits_core/mcp/__init__.py +20 -0
- package/hits_core/mcp/server.py +429 -0
- package/hits_core/models/__init__.py +18 -0
- package/hits_core/models/node.py +56 -0
- package/hits_core/models/tree.py +68 -0
- package/hits_core/models/work_log.py +64 -0
- package/hits_core/models/workflow.py +92 -0
- package/hits_core/platform/__init__.py +5 -0
- package/hits_core/platform/actions.py +225 -0
- package/hits_core/service/__init__.py +6 -0
- package/hits_core/service/handover_service.py +382 -0
- package/hits_core/service/knowledge_service.py +172 -0
- package/hits_core/service/tree_service.py +105 -0
- package/hits_core/storage/__init__.py +11 -0
- package/hits_core/storage/base.py +84 -0
- package/hits_core/storage/file_store.py +314 -0
- package/hits_core/storage/redis_store.py +123 -0
- package/hits_web/dist/assets/index-Bgx7F6m6.css +1 -0
- package/hits_web/dist/assets/index-D1B5E67G.js +3 -0
- package/hits_web/dist/index.html +16 -0
- package/package.json +60 -0
- package/requirements-core.txt +7 -0
- package/requirements.txt +1 -0
- package/run.sh +271 -0
- package/server.js +234 -0
|
@@ -0,0 +1,382 @@
|
|
|
1
|
+
"""Handover service - generates project-scoped session handover summaries.
|
|
2
|
+
|
|
3
|
+
Enables seamless context transfer between AI tools (Claude, OpenCode, etc.)
|
|
4
|
+
when token limits are reached or the user switches tools.
|
|
5
|
+
|
|
6
|
+
Key design:
|
|
7
|
+
- Project-scoped: only includes work logs for the specified project
|
|
8
|
+
- Data-driven: no LLM dependency for reliability
|
|
9
|
+
- Structured: machine-readable format that any AI can consume
|
|
10
|
+
- File-based fallback: works even when HITS server is down
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import os
|
|
14
|
+
import subprocess
|
|
15
|
+
from datetime import datetime
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Optional
|
|
18
|
+
|
|
19
|
+
from ..storage.base import BaseStorage
|
|
20
|
+
from ..storage.file_store import FileStorage
|
|
21
|
+
from ..models.work_log import WorkLog
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class HandoverSummary:
|
|
25
|
+
"""Structured handover summary for a specific project."""
|
|
26
|
+
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
project_path: str,
|
|
30
|
+
project_name: str = "",
|
|
31
|
+
recent_logs: Optional[list[WorkLog]] = None,
|
|
32
|
+
files_modified: Optional[list[str]] = None,
|
|
33
|
+
commands_run: Optional[list[str]] = None,
|
|
34
|
+
key_decisions: Optional[list[str]] = None,
|
|
35
|
+
pending_items: Optional[list[str]] = None,
|
|
36
|
+
session_history: Optional[list[dict]] = None,
|
|
37
|
+
git_branch: Optional[str] = None,
|
|
38
|
+
git_status: Optional[str] = None,
|
|
39
|
+
generated_at: Optional[datetime] = None,
|
|
40
|
+
):
|
|
41
|
+
self.project_path = project_path
|
|
42
|
+
self.project_name = project_name or Path(project_path).name
|
|
43
|
+
self.recent_logs = recent_logs or []
|
|
44
|
+
self.files_modified = files_modified or []
|
|
45
|
+
self.commands_run = commands_run or []
|
|
46
|
+
self.key_decisions = key_decisions or []
|
|
47
|
+
self.pending_items = pending_items or []
|
|
48
|
+
self.session_history = session_history or []
|
|
49
|
+
self.git_branch = git_branch
|
|
50
|
+
self.git_status = git_status
|
|
51
|
+
self.generated_at = generated_at or datetime.now()
|
|
52
|
+
|
|
53
|
+
def to_dict(self) -> dict:
|
|
54
|
+
return {
|
|
55
|
+
"project_path": self.project_path,
|
|
56
|
+
"project_name": self.project_name,
|
|
57
|
+
"generated_at": self.generated_at.isoformat(),
|
|
58
|
+
"git_branch": self.git_branch,
|
|
59
|
+
"git_status": self.git_status,
|
|
60
|
+
"session_history": self.session_history,
|
|
61
|
+
"key_decisions": self.key_decisions,
|
|
62
|
+
"pending_items": self.pending_items,
|
|
63
|
+
"files_modified": self.files_modified,
|
|
64
|
+
"commands_run": self.commands_run,
|
|
65
|
+
"recent_logs": [
|
|
66
|
+
{
|
|
67
|
+
"id": log.id,
|
|
68
|
+
"performed_by": log.performed_by,
|
|
69
|
+
"performed_at": log.performed_at.isoformat(),
|
|
70
|
+
"request_text": log.request_text,
|
|
71
|
+
"context": log.context,
|
|
72
|
+
"source": log.source,
|
|
73
|
+
"tags": log.tags,
|
|
74
|
+
"result_type": log.result_type,
|
|
75
|
+
}
|
|
76
|
+
for log in self.recent_logs
|
|
77
|
+
],
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
def to_text(self) -> str:
|
|
81
|
+
"""Generate human-readable handover text.
|
|
82
|
+
|
|
83
|
+
Format designed for:
|
|
84
|
+
1. Direct copy-paste into AI chat as context
|
|
85
|
+
2. Human reading as a project status report
|
|
86
|
+
3. Pasting into documents/wiki
|
|
87
|
+
"""
|
|
88
|
+
lines = []
|
|
89
|
+
|
|
90
|
+
# Header
|
|
91
|
+
lines.append(f"📋 인수인계: {self.project_name}")
|
|
92
|
+
lines.append(f"{'=' * 40}")
|
|
93
|
+
lines.append(f"경로: {self.project_path}")
|
|
94
|
+
lines.append(f"시간: {self.generated_at.strftime('%Y-%m-%d %H:%M')}")
|
|
95
|
+
|
|
96
|
+
if self.git_branch:
|
|
97
|
+
lines.append(f"브랜치: {self.git_branch} ({self.git_status or '?'})")
|
|
98
|
+
|
|
99
|
+
lines.append("")
|
|
100
|
+
|
|
101
|
+
# Session history
|
|
102
|
+
if self.session_history:
|
|
103
|
+
lines.append("👥 작업 이력")
|
|
104
|
+
lines.append("-" * 30)
|
|
105
|
+
for session in self.session_history:
|
|
106
|
+
tool = session.get("performed_by", "unknown")
|
|
107
|
+
count = session.get("log_count", 0)
|
|
108
|
+
last = session.get("last_activity", "")[:16]
|
|
109
|
+
lines.append(f" {tool}: {count}건 (마지막: {last})")
|
|
110
|
+
lines.append("")
|
|
111
|
+
|
|
112
|
+
# Key decisions
|
|
113
|
+
if self.key_decisions:
|
|
114
|
+
lines.append("★ 주요 결정 사항")
|
|
115
|
+
lines.append("-" * 30)
|
|
116
|
+
for decision in self.key_decisions:
|
|
117
|
+
lines.append(f" • {decision}")
|
|
118
|
+
lines.append("")
|
|
119
|
+
|
|
120
|
+
# Pending items
|
|
121
|
+
if self.pending_items:
|
|
122
|
+
lines.append("⚠ 미완료 / 후속 작업")
|
|
123
|
+
lines.append("-" * 30)
|
|
124
|
+
for item in self.pending_items:
|
|
125
|
+
lines.append(f" • {item}")
|
|
126
|
+
lines.append("")
|
|
127
|
+
|
|
128
|
+
# Files modified
|
|
129
|
+
if self.files_modified:
|
|
130
|
+
unique = sorted(set(self.files_modified))
|
|
131
|
+
lines.append(f"📄 수정된 파일 ({len(unique)}개)")
|
|
132
|
+
lines.append("-" * 30)
|
|
133
|
+
for f in unique[:15]:
|
|
134
|
+
lines.append(f" {f}")
|
|
135
|
+
if len(unique) > 15:
|
|
136
|
+
lines.append(f" ... 외 {len(unique) - 15}개")
|
|
137
|
+
lines.append("")
|
|
138
|
+
|
|
139
|
+
# Recent work
|
|
140
|
+
if self.recent_logs:
|
|
141
|
+
lines.append("📝 최근 작업")
|
|
142
|
+
lines.append("-" * 30)
|
|
143
|
+
for log in self.recent_logs[:10]:
|
|
144
|
+
ts = log.performed_at.strftime("%m/%d %H:%M")
|
|
145
|
+
tool = log.performed_by
|
|
146
|
+
summary = log.request_text or log.context or "(내용 없음)"
|
|
147
|
+
tags = f" [{', '.join(log.tags)}]" if log.tags else ""
|
|
148
|
+
lines.append(f" [{ts}] {tool}: {summary[:80]}{tags}")
|
|
149
|
+
|
|
150
|
+
# Empty state
|
|
151
|
+
if not self.session_history and not self.recent_logs:
|
|
152
|
+
lines.append("기록된 작업이 없습니다.")
|
|
153
|
+
|
|
154
|
+
return "\n".join(lines)
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
class HandoverService:
|
|
158
|
+
"""Generate project-scoped handover summaries.
|
|
159
|
+
|
|
160
|
+
Usage:
|
|
161
|
+
service = HandoverService()
|
|
162
|
+
|
|
163
|
+
# New AI session starts → get handover
|
|
164
|
+
summary = await service.get_handover("/home/user/source/my-project")
|
|
165
|
+
|
|
166
|
+
# Print structured text for AI context
|
|
167
|
+
print(summary.to_text())
|
|
168
|
+
|
|
169
|
+
# Or get machine-readable dict
|
|
170
|
+
data = summary.to_dict()
|
|
171
|
+
"""
|
|
172
|
+
|
|
173
|
+
def __init__(self, storage: Optional[BaseStorage] = None):
|
|
174
|
+
self.storage = storage or FileStorage() # defaults to ~/.hits/data/
|
|
175
|
+
|
|
176
|
+
def _detect_git_info(self, project_path: str) -> tuple[Optional[str], Optional[str]]:
|
|
177
|
+
"""Detect git branch and status for a project."""
|
|
178
|
+
branch = None
|
|
179
|
+
status = None
|
|
180
|
+
|
|
181
|
+
git_dir = Path(project_path) / ".git"
|
|
182
|
+
if not git_dir.exists():
|
|
183
|
+
return branch, status
|
|
184
|
+
|
|
185
|
+
try:
|
|
186
|
+
result = subprocess.run(
|
|
187
|
+
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
|
188
|
+
capture_output=True, text=True, timeout=5,
|
|
189
|
+
cwd=project_path,
|
|
190
|
+
)
|
|
191
|
+
if result.returncode == 0:
|
|
192
|
+
branch = result.stdout.strip()
|
|
193
|
+
except Exception:
|
|
194
|
+
pass
|
|
195
|
+
|
|
196
|
+
try:
|
|
197
|
+
result = subprocess.run(
|
|
198
|
+
["git", "status", "--short"],
|
|
199
|
+
capture_output=True, text=True, timeout=5,
|
|
200
|
+
cwd=project_path,
|
|
201
|
+
)
|
|
202
|
+
if result.returncode == 0:
|
|
203
|
+
status_lines = result.stdout.strip().split("\n")
|
|
204
|
+
status_lines = [l for l in status_lines if l.strip()]
|
|
205
|
+
if status_lines:
|
|
206
|
+
status = f"{len(status_lines)} changes"
|
|
207
|
+
else:
|
|
208
|
+
status = "clean"
|
|
209
|
+
except Exception:
|
|
210
|
+
pass
|
|
211
|
+
|
|
212
|
+
return branch, status
|
|
213
|
+
|
|
214
|
+
def _extract_key_decisions(self, logs: list[WorkLog]) -> list[str]:
|
|
215
|
+
"""Extract key decisions from work log contexts.
|
|
216
|
+
|
|
217
|
+
Heuristic: logs with 'decide', 'determine', 'change', 'important'
|
|
218
|
+
in their context or tags are treated as key decisions.
|
|
219
|
+
"""
|
|
220
|
+
decision_keywords = [
|
|
221
|
+
"결정", "변경", "선택", "채택", "important", "decide",
|
|
222
|
+
"아키텍처", "설계", "design", "architecture", "breaking",
|
|
223
|
+
"중요", "필수",
|
|
224
|
+
]
|
|
225
|
+
decisions = []
|
|
226
|
+
|
|
227
|
+
for log in logs:
|
|
228
|
+
text = f"{log.context or ''} {log.request_text or ''} {' '.join(log.tags)}".lower()
|
|
229
|
+
for kw in decision_keywords:
|
|
230
|
+
if kw.lower() in text:
|
|
231
|
+
decision = log.request_text or log.context or ""
|
|
232
|
+
if decision and decision not in decisions:
|
|
233
|
+
decisions.append(decision[:120])
|
|
234
|
+
break
|
|
235
|
+
|
|
236
|
+
return decisions[:5]
|
|
237
|
+
|
|
238
|
+
def _extract_pending_items(self, logs: list[WorkLog]) -> list[str]:
|
|
239
|
+
"""Extract pending/incomplete items from recent logs.
|
|
240
|
+
|
|
241
|
+
Heuristic: logs with 'todo', 'pending', 'fixme', '미완', '필요'
|
|
242
|
+
or tags like 'wip', 'todo', 'incomplete'.
|
|
243
|
+
"""
|
|
244
|
+
pending_keywords = ["todo", "pending", "fixme", "미완", "필요", "남음", "wip"]
|
|
245
|
+
pending_tags = {"todo", "wip", "incomplete", "pending"}
|
|
246
|
+
items = []
|
|
247
|
+
|
|
248
|
+
for log in logs:
|
|
249
|
+
tags_lower = {t.lower() for t in log.tags}
|
|
250
|
+
if pending_tags & tags_lower:
|
|
251
|
+
text = log.request_text or log.context or ""
|
|
252
|
+
if text and text not in items:
|
|
253
|
+
items.append(text[:120])
|
|
254
|
+
continue
|
|
255
|
+
|
|
256
|
+
text = f"{log.context or ''} {log.request_text or ''}".lower()
|
|
257
|
+
for kw in pending_keywords:
|
|
258
|
+
if kw in text:
|
|
259
|
+
original = log.request_text or log.context or ""
|
|
260
|
+
if original and original not in items:
|
|
261
|
+
items.append(original[:120])
|
|
262
|
+
break
|
|
263
|
+
|
|
264
|
+
return items[:5]
|
|
265
|
+
|
|
266
|
+
def _build_session_history(self, logs: list[WorkLog]) -> list[dict]:
|
|
267
|
+
"""Build per-AI-tool session breakdown."""
|
|
268
|
+
sessions: dict[str, dict] = {}
|
|
269
|
+
|
|
270
|
+
for log in logs:
|
|
271
|
+
performer = log.performed_by
|
|
272
|
+
if performer not in sessions:
|
|
273
|
+
sessions[performer] = {
|
|
274
|
+
"performed_by": performer,
|
|
275
|
+
"log_count": 0,
|
|
276
|
+
"first_activity": log.performed_at.isoformat(),
|
|
277
|
+
"last_activity": log.performed_at.isoformat(),
|
|
278
|
+
}
|
|
279
|
+
sessions[performer]["log_count"] += 1
|
|
280
|
+
sessions[performer]["last_activity"] = log.performed_at.isoformat()
|
|
281
|
+
|
|
282
|
+
return sorted(sessions.values(), key=lambda x: x["last_activity"], reverse=True)
|
|
283
|
+
|
|
284
|
+
async def get_handover(
|
|
285
|
+
self,
|
|
286
|
+
project_path: str,
|
|
287
|
+
recent_count: int = 20,
|
|
288
|
+
) -> HandoverSummary:
|
|
289
|
+
"""Generate a handover summary for a specific project.
|
|
290
|
+
|
|
291
|
+
Args:
|
|
292
|
+
project_path: Absolute path identifying the project.
|
|
293
|
+
recent_count: Number of recent work logs to include.
|
|
294
|
+
|
|
295
|
+
Returns:
|
|
296
|
+
HandoverSummary with all context needed for the next AI session.
|
|
297
|
+
"""
|
|
298
|
+
# Normalize path
|
|
299
|
+
project_path = str(Path(project_path).resolve())
|
|
300
|
+
|
|
301
|
+
# Get project-scoped work logs
|
|
302
|
+
logs = await self.storage.list_work_logs(
|
|
303
|
+
project_path=project_path,
|
|
304
|
+
limit=recent_count,
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
# If no logs found with exact path, try matching by project name
|
|
308
|
+
if not logs:
|
|
309
|
+
all_logs = await self.storage.list_work_logs(limit=50)
|
|
310
|
+
project_name = Path(project_path).name
|
|
311
|
+
logs = [
|
|
312
|
+
log for log in all_logs
|
|
313
|
+
if log.project_path and (
|
|
314
|
+
Path(log.project_path).name == project_name
|
|
315
|
+
or project_name in (log.project_path or "")
|
|
316
|
+
)
|
|
317
|
+
][:recent_count]
|
|
318
|
+
|
|
319
|
+
# Collect aggregated data
|
|
320
|
+
files_modified: list[str] = []
|
|
321
|
+
commands_run: list[str] = []
|
|
322
|
+
|
|
323
|
+
for log in logs:
|
|
324
|
+
if log.result_data:
|
|
325
|
+
files_modified.extend(log.result_data.get("files_modified", []))
|
|
326
|
+
commands_run.extend(log.result_data.get("commands_run", []))
|
|
327
|
+
|
|
328
|
+
# Detect git info
|
|
329
|
+
git_branch, git_status = self._detect_git_info(project_path)
|
|
330
|
+
|
|
331
|
+
return HandoverSummary(
|
|
332
|
+
project_path=project_path,
|
|
333
|
+
recent_logs=logs,
|
|
334
|
+
files_modified=files_modified,
|
|
335
|
+
commands_run=commands_run,
|
|
336
|
+
key_decisions=self._extract_key_decisions(logs),
|
|
337
|
+
pending_items=self._extract_pending_items(logs),
|
|
338
|
+
session_history=self._build_session_history(logs),
|
|
339
|
+
git_branch=git_branch,
|
|
340
|
+
git_status=git_status,
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
async def list_projects(self) -> list[dict]:
|
|
344
|
+
"""List all projects that have work logs.
|
|
345
|
+
|
|
346
|
+
Returns:
|
|
347
|
+
List of dicts with project_path and summary stats.
|
|
348
|
+
"""
|
|
349
|
+
paths = await self.storage.list_project_paths()
|
|
350
|
+
projects = []
|
|
351
|
+
|
|
352
|
+
for path in paths:
|
|
353
|
+
summary = await self.storage.get_project_summary(path)
|
|
354
|
+
projects.append(summary)
|
|
355
|
+
|
|
356
|
+
return sorted(projects, key=lambda x: x.get("last_activity") or "", reverse=True)
|
|
357
|
+
|
|
358
|
+
async def get_all_handovers(self) -> HandoverSummary:
|
|
359
|
+
"""Get a combined handover summary across all projects.
|
|
360
|
+
|
|
361
|
+
Returns a merged view showing all recent activity regardless of project.
|
|
362
|
+
"""
|
|
363
|
+
all_logs = await self.storage.list_work_logs(limit=50)
|
|
364
|
+
|
|
365
|
+
files_modified: list[str] = []
|
|
366
|
+
commands_run: list[str] = []
|
|
367
|
+
|
|
368
|
+
for log in all_logs:
|
|
369
|
+
if log.result_data:
|
|
370
|
+
files_modified.extend(log.result_data.get("files_modified", []))
|
|
371
|
+
commands_run.extend(log.result_data.get("commands_run", []))
|
|
372
|
+
|
|
373
|
+
return HandoverSummary(
|
|
374
|
+
project_path="all",
|
|
375
|
+
project_name="전체 프로젝트",
|
|
376
|
+
recent_logs=all_logs,
|
|
377
|
+
files_modified=files_modified,
|
|
378
|
+
commands_run=commands_run,
|
|
379
|
+
key_decisions=self._extract_key_decisions(all_logs),
|
|
380
|
+
pending_items=self._extract_pending_items(all_logs),
|
|
381
|
+
session_history=self._build_session_history(all_logs),
|
|
382
|
+
)
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
"""Knowledge tree CRUD service."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Optional
|
|
6
|
+
from dataclasses import dataclass, asdict
|
|
7
|
+
import shutil
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class KnowledgeNode:
|
|
12
|
+
name: str
|
|
13
|
+
layer: str = "what"
|
|
14
|
+
type: str = "url"
|
|
15
|
+
action: str = ""
|
|
16
|
+
negative_path: bool = False
|
|
17
|
+
|
|
18
|
+
def to_dict(self) -> dict:
|
|
19
|
+
d = {"name": self.name, "layer": self.layer, "type": self.type, "action": self.action}
|
|
20
|
+
if self.negative_path:
|
|
21
|
+
d["negative_path"] = True
|
|
22
|
+
return d
|
|
23
|
+
|
|
24
|
+
@classmethod
|
|
25
|
+
def from_dict(cls, data: dict) -> "KnowledgeNode":
|
|
26
|
+
return cls(
|
|
27
|
+
name=data.get("name", ""),
|
|
28
|
+
layer=data.get("layer", "what"),
|
|
29
|
+
type=data.get("type", "url"),
|
|
30
|
+
action=data.get("action", ""),
|
|
31
|
+
negative_path=data.get("negative_path", False),
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass
|
|
36
|
+
class KnowledgeCategory:
|
|
37
|
+
name: str
|
|
38
|
+
icon: str = "📁"
|
|
39
|
+
items: list = None
|
|
40
|
+
|
|
41
|
+
def __post_init__(self):
|
|
42
|
+
if self.items is None:
|
|
43
|
+
self.items = []
|
|
44
|
+
|
|
45
|
+
def to_dict(self) -> dict:
|
|
46
|
+
return {
|
|
47
|
+
"name": self.name,
|
|
48
|
+
"icon": self.icon,
|
|
49
|
+
"items": [item.to_dict() if isinstance(item, KnowledgeNode) else item for item in self.items],
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
@classmethod
|
|
53
|
+
def from_dict(cls, data: dict) -> "KnowledgeCategory":
|
|
54
|
+
items = [KnowledgeNode.from_dict(item) if isinstance(item, dict) else item for item in data.get("items", [])]
|
|
55
|
+
return cls(name=data.get("name", ""), icon=data.get("icon", "📁"), items=items)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class KnowledgeService:
|
|
59
|
+
DATA_FILE = Path(__file__).parent.parent.parent / "data" / "knowledge.json"
|
|
60
|
+
BACKUP_SUFFIX = ".bak"
|
|
61
|
+
|
|
62
|
+
def __init__(self, data_path: Optional[Path] = None):
|
|
63
|
+
if data_path:
|
|
64
|
+
self.DATA_FILE = Path(data_path)
|
|
65
|
+
self._ensure_data_file()
|
|
66
|
+
|
|
67
|
+
def _ensure_data_file(self):
|
|
68
|
+
self.DATA_FILE.parent.mkdir(parents=True, exist_ok=True)
|
|
69
|
+
if not self.DATA_FILE.exists():
|
|
70
|
+
self._save_categories([])
|
|
71
|
+
|
|
72
|
+
def _load_categories(self) -> list[KnowledgeCategory]:
|
|
73
|
+
if not self.DATA_FILE.exists():
|
|
74
|
+
return []
|
|
75
|
+
try:
|
|
76
|
+
with open(self.DATA_FILE, "r", encoding="utf-8") as f:
|
|
77
|
+
data = json.load(f)
|
|
78
|
+
return [KnowledgeCategory.from_dict(cat) for cat in data.get("categories", [])]
|
|
79
|
+
except (json.JSONDecodeError, IOError):
|
|
80
|
+
return []
|
|
81
|
+
|
|
82
|
+
def _save_categories(self, categories: list[KnowledgeCategory]) -> bool:
|
|
83
|
+
try:
|
|
84
|
+
if self.DATA_FILE.exists():
|
|
85
|
+
shutil.copy(self.DATA_FILE, str(self.DATA_FILE) + self.BACKUP_SUFFIX)
|
|
86
|
+
data = {"categories": [cat.to_dict() for cat in categories]}
|
|
87
|
+
with open(self.DATA_FILE, "w", encoding="utf-8") as f:
|
|
88
|
+
json.dump(data, f, ensure_ascii=False, indent=2)
|
|
89
|
+
return True
|
|
90
|
+
except IOError:
|
|
91
|
+
return False
|
|
92
|
+
|
|
93
|
+
def list_categories(self) -> list[KnowledgeCategory]:
|
|
94
|
+
return self._load_categories()
|
|
95
|
+
|
|
96
|
+
def get_category(self, category_name: str) -> Optional[KnowledgeCategory]:
|
|
97
|
+
categories = self._load_categories()
|
|
98
|
+
for cat in categories:
|
|
99
|
+
if cat.name == category_name:
|
|
100
|
+
return cat
|
|
101
|
+
return None
|
|
102
|
+
|
|
103
|
+
def add_category(self, name: str, icon: str = "📁") -> Optional[KnowledgeCategory]:
|
|
104
|
+
categories = self._load_categories()
|
|
105
|
+
if any(cat.name == name for cat in categories):
|
|
106
|
+
return None
|
|
107
|
+
new_cat = KnowledgeCategory(name=name, icon=icon)
|
|
108
|
+
categories.append(new_cat)
|
|
109
|
+
if self._save_categories(categories):
|
|
110
|
+
return new_cat
|
|
111
|
+
return None
|
|
112
|
+
|
|
113
|
+
def update_category(self, old_name: str, new_name: str, icon: str = None) -> bool:
|
|
114
|
+
categories = self._load_categories()
|
|
115
|
+
for cat in categories:
|
|
116
|
+
if cat.name == old_name:
|
|
117
|
+
cat.name = new_name
|
|
118
|
+
if icon is not None:
|
|
119
|
+
cat.icon = icon
|
|
120
|
+
return self._save_categories(categories)
|
|
121
|
+
return False
|
|
122
|
+
|
|
123
|
+
def delete_category(self, category_name: str) -> bool:
|
|
124
|
+
categories = self._load_categories()
|
|
125
|
+
new_categories = [cat for cat in categories if cat.name != category_name]
|
|
126
|
+
if len(new_categories) == len(categories):
|
|
127
|
+
return False
|
|
128
|
+
return self._save_categories(new_categories)
|
|
129
|
+
|
|
130
|
+
def add_node(self, category_name: str, node: KnowledgeNode) -> bool:
|
|
131
|
+
categories = self._load_categories()
|
|
132
|
+
for cat in categories:
|
|
133
|
+
if cat.name == category_name:
|
|
134
|
+
cat.items.append(node)
|
|
135
|
+
return self._save_categories(categories)
|
|
136
|
+
return False
|
|
137
|
+
|
|
138
|
+
def update_node(self, category_name: str, node_index: int, node: KnowledgeNode) -> bool:
|
|
139
|
+
categories = self._load_categories()
|
|
140
|
+
for cat in categories:
|
|
141
|
+
if cat.name == category_name:
|
|
142
|
+
if 0 <= node_index < len(cat.items):
|
|
143
|
+
cat.items[node_index] = node
|
|
144
|
+
return self._save_categories(categories)
|
|
145
|
+
return False
|
|
146
|
+
|
|
147
|
+
def delete_node(self, category_name: str, node_index: int) -> bool:
|
|
148
|
+
categories = self._load_categories()
|
|
149
|
+
for cat in categories:
|
|
150
|
+
if cat.name == category_name:
|
|
151
|
+
if 0 <= node_index < len(cat.items):
|
|
152
|
+
cat.items.pop(node_index)
|
|
153
|
+
return self._save_categories(categories)
|
|
154
|
+
return False
|
|
155
|
+
|
|
156
|
+
def get_node(self, category_name: str, node_index: int) -> Optional[KnowledgeNode]:
|
|
157
|
+
category = self.get_category(category_name)
|
|
158
|
+
if category and 0 <= node_index < len(category.items):
|
|
159
|
+
return category.items[node_index]
|
|
160
|
+
return None
|
|
161
|
+
|
|
162
|
+
def find_node_index(self, category_name: str, node_name: str) -> int:
|
|
163
|
+
category = self.get_category(category_name)
|
|
164
|
+
if category:
|
|
165
|
+
for i, item in enumerate(category.items):
|
|
166
|
+
if item.name == node_name:
|
|
167
|
+
return i
|
|
168
|
+
return -1
|
|
169
|
+
|
|
170
|
+
def to_config_dict(self) -> dict:
|
|
171
|
+
categories = self._load_categories()
|
|
172
|
+
return {"categories": [cat.to_dict() for cat in categories]}
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
"""Tree management service."""
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
from ..models.tree import KnowledgeTree
|
|
5
|
+
from ..models.node import Node, NodeLayer
|
|
6
|
+
from ..models.workflow import Workflow
|
|
7
|
+
from ..storage.base import BaseStorage
|
|
8
|
+
from ..storage.file_store import FileStorage
|
|
9
|
+
from ..ai.compressor import SemanticCompressor
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class TreeService:
|
|
13
|
+
def __init__(self, storage: Optional[BaseStorage] = None):
|
|
14
|
+
self.storage = storage or FileStorage()
|
|
15
|
+
self.compressor = SemanticCompressor()
|
|
16
|
+
|
|
17
|
+
async def create_tree(
|
|
18
|
+
self,
|
|
19
|
+
tree_id: str,
|
|
20
|
+
name: str,
|
|
21
|
+
description: Optional[str] = None,
|
|
22
|
+
) -> KnowledgeTree:
|
|
23
|
+
tree = KnowledgeTree(
|
|
24
|
+
id=tree_id,
|
|
25
|
+
name=name,
|
|
26
|
+
description=description,
|
|
27
|
+
)
|
|
28
|
+
await self.storage.save_tree(tree)
|
|
29
|
+
return tree
|
|
30
|
+
|
|
31
|
+
async def get_tree(self, tree_id: str) -> Optional[KnowledgeTree]:
|
|
32
|
+
return await self.storage.load_tree(tree_id)
|
|
33
|
+
|
|
34
|
+
async def save_tree(self, tree: KnowledgeTree) -> bool:
|
|
35
|
+
return await self.storage.save_tree(tree)
|
|
36
|
+
|
|
37
|
+
async def delete_tree(self, tree_id: str) -> bool:
|
|
38
|
+
return await self.storage.delete_tree(tree_id)
|
|
39
|
+
|
|
40
|
+
async def list_trees(self) -> list[str]:
|
|
41
|
+
return await self.storage.list_trees()
|
|
42
|
+
|
|
43
|
+
async def add_node(
|
|
44
|
+
self,
|
|
45
|
+
tree_id: str,
|
|
46
|
+
node: Node,
|
|
47
|
+
compress: bool = True,
|
|
48
|
+
) -> bool:
|
|
49
|
+
tree = await self.get_tree(tree_id)
|
|
50
|
+
if not tree:
|
|
51
|
+
return False
|
|
52
|
+
|
|
53
|
+
if compress:
|
|
54
|
+
self.compressor.compress_node(node)
|
|
55
|
+
|
|
56
|
+
tree.add_node(node)
|
|
57
|
+
return await self.save_tree(tree)
|
|
58
|
+
|
|
59
|
+
async def remove_node(self, tree_id: str, node_id: str) -> Optional[Node]:
|
|
60
|
+
tree = await self.get_tree(tree_id)
|
|
61
|
+
if not tree:
|
|
62
|
+
return None
|
|
63
|
+
|
|
64
|
+
node = tree.remove_node(node_id)
|
|
65
|
+
if node:
|
|
66
|
+
await self.save_tree(tree)
|
|
67
|
+
return node
|
|
68
|
+
|
|
69
|
+
async def get_node(self, tree_id: str, node_id: str) -> Optional[Node]:
|
|
70
|
+
tree = await self.get_tree(tree_id)
|
|
71
|
+
if not tree:
|
|
72
|
+
return None
|
|
73
|
+
return tree.get_node(node_id)
|
|
74
|
+
|
|
75
|
+
async def get_children(self, tree_id: str, node_id: str) -> list[Node]:
|
|
76
|
+
tree = await self.get_tree(tree_id)
|
|
77
|
+
if not tree:
|
|
78
|
+
return []
|
|
79
|
+
return tree.get_children(node_id)
|
|
80
|
+
|
|
81
|
+
async def get_node_path(self, tree_id: str, node_id: str) -> list[Node]:
|
|
82
|
+
tree = await self.get_tree(tree_id)
|
|
83
|
+
if not tree:
|
|
84
|
+
return []
|
|
85
|
+
return tree.get_path(node_id)
|
|
86
|
+
|
|
87
|
+
async def get_negative_paths(self, tree_id: str) -> list[Node]:
|
|
88
|
+
tree = await self.get_tree(tree_id)
|
|
89
|
+
if not tree:
|
|
90
|
+
return []
|
|
91
|
+
return tree.get_negative_paths()
|
|
92
|
+
|
|
93
|
+
async def get_statistics(self, tree_id: str) -> dict:
|
|
94
|
+
tree = await self.get_tree(tree_id)
|
|
95
|
+
if not tree:
|
|
96
|
+
return {}
|
|
97
|
+
|
|
98
|
+
return {
|
|
99
|
+
"total_nodes": len(tree.nodes),
|
|
100
|
+
"why_nodes": len(tree.get_nodes_by_layer(NodeLayer.WHY)),
|
|
101
|
+
"how_nodes": len(tree.get_nodes_by_layer(NodeLayer.HOW)),
|
|
102
|
+
"what_nodes": len(tree.get_nodes_by_layer(NodeLayer.WHAT)),
|
|
103
|
+
"negative_paths": len(tree.get_negative_paths()),
|
|
104
|
+
"tokens_saved": tree.total_tokens_saved(),
|
|
105
|
+
}
|