knight-os 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +194 -0
- package/bin/knight.js +253 -0
- package/package.json +43 -0
- package/scripts/compress-memory.py +147 -0
- package/scripts/heartbeat.py +200 -0
- package/scripts/knight-status.py +219 -0
- package/scripts/reflection-analyzer.py +319 -0
- package/scripts/write-reflection.py +132 -0
- package/src/chat.js +237 -0
- package/src/config.js +128 -0
- package/src/setup.js +420 -0
- package/templates/AGENTS.md +82 -0
- package/templates/HEARTBEAT.md +54 -0
- package/templates/MEMORY.md +65 -0
- package/templates/PROJECTS.md +60 -0
- package/templates/REDLINES.md +99 -0
- package/templates/SOUL.md +39 -0
- package/templates/TOOLS.md +43 -0
- package/templates/USER.md +63 -0
- package/templates/memory/TEMPLATE-daily.md +21 -0
- package/templates/memory/ai-patterns.md +90 -0
- package/templates/memory/user-patterns.md +52 -0
|
@@ -0,0 +1,319 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
reflection-analyzer.py — Analyze reflections for repeated failure patterns.
|
|
4
|
+
|
|
5
|
+
Usage:
|
|
6
|
+
python3 scripts/reflection-analyzer.py # Incremental analysis
|
|
7
|
+
python3 scripts/reflection-analyzer.py --dry-run # Print only, no side effects
|
|
8
|
+
python3 scripts/reflection-analyzer.py --all # Full analysis (ignore cursor)
|
|
9
|
+
python3 scripts/reflection-analyzer.py --min-count 3 # Adjust threshold (default: 2)
|
|
10
|
+
|
|
11
|
+
Backends:
|
|
12
|
+
- local (default): reads from {workspace}/memory/reflections/*.jsonl
|
|
13
|
+
- supabase: queries Supabase REST API (requires config)
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import json
|
|
17
|
+
import os
|
|
18
|
+
import sys
|
|
19
|
+
import argparse
|
|
20
|
+
import re
|
|
21
|
+
import urllib.request
|
|
22
|
+
from datetime import datetime, timezone
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
from collections import defaultdict
|
|
25
|
+
from typing import Optional
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
PATTERN_MAP = [
|
|
29
|
+
{
|
|
30
|
+
"id": "db-schema-assumption",
|
|
31
|
+
"label": "Database field assumption",
|
|
32
|
+
"keywords": ["column", "schema", "field", "table", "mapping"],
|
|
33
|
+
"rule": "Verify schema before any DB operation: `SELECT column_name FROM information_schema.columns WHERE table_name='xxx'`",
|
|
34
|
+
},
|
|
35
|
+
{
|
|
36
|
+
"id": "task-status-not-updated",
|
|
37
|
+
"label": "Task status not updated after completion",
|
|
38
|
+
"keywords": ["PATCH", "status", "not updated", "backlog", "task status"],
|
|
39
|
+
"rule": "After task completion, always: write-reflection + update status — both steps must complete together",
|
|
40
|
+
},
|
|
41
|
+
{
|
|
42
|
+
"id": "missing-reflection",
|
|
43
|
+
"label": "Forgot to write reflection",
|
|
44
|
+
"keywords": ["reflection", "forgot", "missing", "not written"],
|
|
45
|
+
"rule": "No reflection = task not complete. Always call write-reflection.py immediately after finishing a task",
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
"id": "heartbeat-gap",
|
|
49
|
+
"label": "Heartbeat gap/interruption",
|
|
50
|
+
"keywords": ["gap", "interrupted", "blank", "no record", "heartbeat"],
|
|
51
|
+
"rule": "After a heartbeat gap, explain the reason on next execution. 2+ consecutive gaps require user notification",
|
|
52
|
+
},
|
|
53
|
+
{
|
|
54
|
+
"id": "silent-tool-switch",
|
|
55
|
+
"label": "Silent tool switch after failure",
|
|
56
|
+
"keywords": ["silent", "tool failed", "unreported", "switch"],
|
|
57
|
+
"rule": "When a tool fails, always report: 'X failed, switching to Y' — silence is a violation",
|
|
58
|
+
},
|
|
59
|
+
{
|
|
60
|
+
"id": "read-before-exec",
|
|
61
|
+
"label": "Modify without reading first",
|
|
62
|
+
"keywords": ["read", "file", "confirm", "structure", "before modify"],
|
|
63
|
+
"rule": "Before modifying any file: read first, understand structure, then execute",
|
|
64
|
+
},
|
|
65
|
+
{
|
|
66
|
+
"id": "rule-exists-not-executed",
|
|
67
|
+
"label": "Rule exists but not followed",
|
|
68
|
+
"keywords": ["rule exists", "known but", "not executed", "upgrade"],
|
|
69
|
+
"rule": "When a rule exists but wasn't followed, escalate its priority to mandatory trigger",
|
|
70
|
+
},
|
|
71
|
+
{
|
|
72
|
+
"id": "blocked-task-not-surfaced",
|
|
73
|
+
"label": "Blocked task not surfaced",
|
|
74
|
+
"keywords": ["blocked", "waiting", "needs confirmation", "cannot close"],
|
|
75
|
+
"rule": "Blocked tasks must be surfaced every heartbeat — never let them accumulate silently",
|
|
76
|
+
},
|
|
77
|
+
]
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def load_config():
|
|
81
|
+
config_paths = [
|
|
82
|
+
Path.cwd() / "knight.config.json",
|
|
83
|
+
Path.home() / ".knight" / "config.json",
|
|
84
|
+
]
|
|
85
|
+
for p in config_paths:
|
|
86
|
+
if p.exists():
|
|
87
|
+
try:
|
|
88
|
+
return json.loads(p.read_text())
|
|
89
|
+
except (json.JSONDecodeError, OSError):
|
|
90
|
+
pass
|
|
91
|
+
return {}
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def resolve_workspace(config):
|
|
95
|
+
ws = config.get("workspace", "~/.openclaw/workspace")
|
|
96
|
+
return Path(ws).expanduser()
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def get_cursor_path(config):
|
|
100
|
+
workspace = resolve_workspace(config)
|
|
101
|
+
return workspace / ".knight-reflection-cursor"
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def read_cursor(config) -> Optional[str]:
|
|
105
|
+
cursor_file = get_cursor_path(config)
|
|
106
|
+
if cursor_file.exists():
|
|
107
|
+
return cursor_file.read_text().strip() or None
|
|
108
|
+
return None
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def write_cursor(config, ts: str):
|
|
112
|
+
cursor_file = get_cursor_path(config)
|
|
113
|
+
cursor_file.write_text(ts)
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def fetch_local(config, since: Optional[str]) -> list:
|
|
117
|
+
"""Read reflections from local .jsonl files."""
|
|
118
|
+
workspace = resolve_workspace(config)
|
|
119
|
+
local_cfg = config.get("storage", {}).get("local", {})
|
|
120
|
+
reflections_dir = workspace / local_cfg.get("reflections_dir", "memory/reflections")
|
|
121
|
+
|
|
122
|
+
if not reflections_dir.exists():
|
|
123
|
+
return []
|
|
124
|
+
|
|
125
|
+
rows = []
|
|
126
|
+
for jsonl_file in sorted(reflections_dir.glob("*.jsonl")):
|
|
127
|
+
with open(jsonl_file, encoding="utf-8") as f:
|
|
128
|
+
for line in f:
|
|
129
|
+
line = line.strip()
|
|
130
|
+
if not line:
|
|
131
|
+
continue
|
|
132
|
+
try:
|
|
133
|
+
row = json.loads(line)
|
|
134
|
+
if since and row.get("created_at", "") <= since:
|
|
135
|
+
continue
|
|
136
|
+
rows.append(row)
|
|
137
|
+
except json.JSONDecodeError:
|
|
138
|
+
continue
|
|
139
|
+
return rows
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def fetch_supabase(config, since: Optional[str]) -> list:
|
|
143
|
+
"""Fetch reflections from Supabase REST API."""
|
|
144
|
+
supabase_cfg = config.get("storage", {}).get("supabase", {})
|
|
145
|
+
url = supabase_cfg.get("url", "")
|
|
146
|
+
key = supabase_cfg.get("service_key", "") or os.environ.get("SUPABASE_SERVICE_KEY", "")
|
|
147
|
+
|
|
148
|
+
if not url or not key:
|
|
149
|
+
print("Error: Supabase URL or key not configured.", file=sys.stderr)
|
|
150
|
+
sys.exit(1)
|
|
151
|
+
|
|
152
|
+
params = "select=id,created_at,context,what_failed,next_time,confidence&order=created_at.asc&limit=200"
|
|
153
|
+
if since:
|
|
154
|
+
ts = since.replace("+", "%2B").replace(":", "%3A")
|
|
155
|
+
params += f"&created_at=gt.{ts}"
|
|
156
|
+
|
|
157
|
+
req = urllib.request.Request(
|
|
158
|
+
f"{url}/rest/v1/reflections?{params}",
|
|
159
|
+
headers={"apikey": key, "Authorization": f"Bearer {key}"},
|
|
160
|
+
method="GET",
|
|
161
|
+
)
|
|
162
|
+
try:
|
|
163
|
+
with urllib.request.urlopen(req, timeout=15) as r:
|
|
164
|
+
return json.load(r)
|
|
165
|
+
except Exception as e:
|
|
166
|
+
print(f"Error: Supabase query failed: {e}", file=sys.stderr)
|
|
167
|
+
sys.exit(1)
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def analyze(rows: list, min_count: int) -> list:
|
|
171
|
+
hits = defaultdict(list)
|
|
172
|
+
|
|
173
|
+
for row in rows:
|
|
174
|
+
text = " ".join([
|
|
175
|
+
row.get("what_failed") or "",
|
|
176
|
+
row.get("next_time") or "",
|
|
177
|
+
row.get("context") or "",
|
|
178
|
+
]).lower()
|
|
179
|
+
|
|
180
|
+
for p in PATTERN_MAP:
|
|
181
|
+
for kw in p["keywords"]:
|
|
182
|
+
if re.search(kw.lower(), text):
|
|
183
|
+
hits[p["id"]].append(row)
|
|
184
|
+
break
|
|
185
|
+
|
|
186
|
+
candidates = []
|
|
187
|
+
for p in PATTERN_MAP:
|
|
188
|
+
matched = hits[p["id"]]
|
|
189
|
+
if len(matched) >= min_count:
|
|
190
|
+
candidates.append({
|
|
191
|
+
"pattern": p,
|
|
192
|
+
"count": len(matched),
|
|
193
|
+
"examples": matched[:2],
|
|
194
|
+
})
|
|
195
|
+
|
|
196
|
+
candidates.sort(key=lambda x: -x["count"])
|
|
197
|
+
return candidates
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def format_console(candidates: list, rows: list, since: Optional[str]) -> str:
|
|
201
|
+
scope = f"since {since[:10]}" if since else "all data"
|
|
202
|
+
lines = [f"\n[knight] Reflection Analyzer | {scope} | {len(rows)} entries"]
|
|
203
|
+
|
|
204
|
+
if not candidates:
|
|
205
|
+
lines.append(" No repeated failure patterns found — system stable")
|
|
206
|
+
else:
|
|
207
|
+
lines.append(f" Found {len(candidates)} candidate rules:\n")
|
|
208
|
+
for i, c in enumerate(candidates, 1):
|
|
209
|
+
p = c["pattern"]
|
|
210
|
+
lines.append(f" [{i}] {p['label']} x {c['count']} times")
|
|
211
|
+
lines.append(f" Rule: {p['rule']}")
|
|
212
|
+
for ex in c["examples"]:
|
|
213
|
+
snippet = (ex.get("what_failed") or "")[:60]
|
|
214
|
+
lines.append(f" Example: \"{snippet}...\"")
|
|
215
|
+
lines.append("")
|
|
216
|
+
|
|
217
|
+
return "\n".join(lines)
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def format_notification(candidates: list, rows: list, since: Optional[str], config: dict) -> Optional[str]:
|
|
221
|
+
if not candidates:
|
|
222
|
+
return None
|
|
223
|
+
|
|
224
|
+
user_name = config.get("user_name", "User")
|
|
225
|
+
scope = f"since {since[:10]}" if since else "all data"
|
|
226
|
+
lines = [
|
|
227
|
+
f"[knight] Reflection Analysis | {scope} | {len(rows)} entries",
|
|
228
|
+
f"Found {len(candidates)} repeated failure patterns — suggest writing to ai-patterns.md\n",
|
|
229
|
+
]
|
|
230
|
+
|
|
231
|
+
for i, c in enumerate(candidates, 1):
|
|
232
|
+
p = c["pattern"]
|
|
233
|
+
lines.append(f"{i}. {p['label']} x {c['count']} times")
|
|
234
|
+
lines.append(f" Rule: {p['rule']}")
|
|
235
|
+
ex = c["examples"][0]
|
|
236
|
+
snippet = (ex.get("what_failed") or "")[:50]
|
|
237
|
+
lines.append(f" Recent: \"{snippet}\"\n")
|
|
238
|
+
|
|
239
|
+
lines.append(f"Reply 'write' to add to ai-patterns.md / 'skip' to ignore")
|
|
240
|
+
return "\n".join(lines)
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
def send_telegram(text: str, config: dict):
|
|
244
|
+
telegram_cfg = config.get("notifications", {}).get("telegram", {})
|
|
245
|
+
bot_token = telegram_cfg.get("bot_token", "") or os.environ.get("TELEGRAM_BOT_TOKEN", "")
|
|
246
|
+
chat_id = telegram_cfg.get("chat_id", "") or os.environ.get("TELEGRAM_CHAT_ID", "")
|
|
247
|
+
|
|
248
|
+
if not bot_token or not chat_id:
|
|
249
|
+
print(" Warning: Telegram not configured, skipping notification", file=sys.stderr)
|
|
250
|
+
return
|
|
251
|
+
|
|
252
|
+
payload = {"chat_id": chat_id, "text": text}
|
|
253
|
+
req = urllib.request.Request(
|
|
254
|
+
f"https://api.telegram.org/bot{bot_token}/sendMessage",
|
|
255
|
+
data=json.dumps(payload).encode(),
|
|
256
|
+
headers={"Content-Type": "application/json"},
|
|
257
|
+
method="POST",
|
|
258
|
+
)
|
|
259
|
+
try:
|
|
260
|
+
with urllib.request.urlopen(req, timeout=10) as r:
|
|
261
|
+
resp = json.load(r)
|
|
262
|
+
if resp.get("ok"):
|
|
263
|
+
print(" Telegram notification sent")
|
|
264
|
+
else:
|
|
265
|
+
print(f" Telegram failed: {resp}", file=sys.stderr)
|
|
266
|
+
except Exception as e:
|
|
267
|
+
print(f" Telegram error: {e}", file=sys.stderr)
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
def main():
|
|
271
|
+
parser = argparse.ArgumentParser(description="Analyze reflections for repeated failure patterns.")
|
|
272
|
+
parser.add_argument("--dry-run", action="store_true", help="Print only, no side effects")
|
|
273
|
+
parser.add_argument("--all", action="store_true", help="Analyze all data (ignore cursor)")
|
|
274
|
+
parser.add_argument("--min-count", type=int, default=2, help="Trigger threshold (default: 2)")
|
|
275
|
+
args = parser.parse_args()
|
|
276
|
+
|
|
277
|
+
config = load_config()
|
|
278
|
+
min_count = config.get("reflection", {}).get("min_pattern_count", args.min_count)
|
|
279
|
+
if args.min_count != 2:
|
|
280
|
+
min_count = args.min_count
|
|
281
|
+
|
|
282
|
+
since = None if args.all else read_cursor(config)
|
|
283
|
+
scope_desc = "all data" if not since else f"since {since[:10]}"
|
|
284
|
+
print(f"[knight] Analysis scope: {scope_desc}")
|
|
285
|
+
|
|
286
|
+
storage_cfg = config.get("storage", {})
|
|
287
|
+
backend = storage_cfg.get("backend", "local")
|
|
288
|
+
|
|
289
|
+
if backend == "supabase" or storage_cfg.get("supabase", {}).get("enabled"):
|
|
290
|
+
rows = fetch_supabase(config, since)
|
|
291
|
+
else:
|
|
292
|
+
rows = fetch_local(config, since)
|
|
293
|
+
|
|
294
|
+
print(f" Read {len(rows)} reflections")
|
|
295
|
+
|
|
296
|
+
if not rows:
|
|
297
|
+
print(" No new data, skipping analysis")
|
|
298
|
+
return
|
|
299
|
+
|
|
300
|
+
candidates = analyze(rows, min_count)
|
|
301
|
+
print(format_console(candidates, rows, since))
|
|
302
|
+
|
|
303
|
+
latest_ts = rows[-1].get("created_at", datetime.now(timezone.utc).isoformat())
|
|
304
|
+
if not args.dry_run:
|
|
305
|
+
write_cursor(config, latest_ts)
|
|
306
|
+
print(f" Cursor updated to {latest_ts[:16]}")
|
|
307
|
+
|
|
308
|
+
if candidates and not args.dry_run:
|
|
309
|
+
notifications_cfg = config.get("notifications", {})
|
|
310
|
+
if notifications_cfg.get("telegram", {}).get("enabled"):
|
|
311
|
+
msg = format_notification(candidates, rows, since, config)
|
|
312
|
+
if msg:
|
|
313
|
+
send_telegram(msg, config)
|
|
314
|
+
elif args.dry_run:
|
|
315
|
+
print("\n [dry-run: no cursor update, no notifications]")
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
if __name__ == "__main__":
|
|
319
|
+
main()
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
write-reflection.py — Write a reflection after task completion.
|
|
4
|
+
|
|
5
|
+
Usage:
|
|
6
|
+
python3 scripts/write-reflection.py \
|
|
7
|
+
--context "Task title" \
|
|
8
|
+
--what_worked "What went well" \
|
|
9
|
+
--what_failed "What did not work" \
|
|
10
|
+
--next_time "How to improve" \
|
|
11
|
+
--tags "execution,memory" \
|
|
12
|
+
--session_type "heartbeat" \
|
|
13
|
+
--confidence 3
|
|
14
|
+
|
|
15
|
+
Storage backends:
|
|
16
|
+
- local (default): appends JSON to {workspace}/memory/reflections/YYYY-MM-DD.jsonl
|
|
17
|
+
- supabase: POST to Supabase REST API (requires storage.supabase.enabled=true in config)
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import json
|
|
21
|
+
import os
|
|
22
|
+
import sys
|
|
23
|
+
import argparse
|
|
24
|
+
from datetime import datetime, timezone
|
|
25
|
+
from pathlib import Path
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def load_config():
|
|
29
|
+
"""Load knight config from project or global path."""
|
|
30
|
+
config_paths = [
|
|
31
|
+
Path.cwd() / "knight.config.json",
|
|
32
|
+
Path.home() / ".knight" / "config.json",
|
|
33
|
+
]
|
|
34
|
+
for p in config_paths:
|
|
35
|
+
if p.exists():
|
|
36
|
+
try:
|
|
37
|
+
return json.loads(p.read_text())
|
|
38
|
+
except (json.JSONDecodeError, OSError) as e:
|
|
39
|
+
print(f"Warning: failed to read {p}: {e}", file=sys.stderr)
|
|
40
|
+
return {}
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def resolve_workspace(config):
|
|
44
|
+
ws = config.get("workspace", "~/.openclaw/workspace")
|
|
45
|
+
return Path(ws).expanduser()
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def write_local(payload, config):
|
|
49
|
+
"""Write reflection as a JSON line to local file."""
|
|
50
|
+
workspace = resolve_workspace(config)
|
|
51
|
+
local_cfg = config.get("storage", {}).get("local", {})
|
|
52
|
+
reflections_dir = workspace / local_cfg.get("reflections_dir", "memory/reflections")
|
|
53
|
+
reflections_dir.mkdir(parents=True, exist_ok=True)
|
|
54
|
+
|
|
55
|
+
today = datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
|
56
|
+
filepath = reflections_dir / f"{today}.jsonl"
|
|
57
|
+
|
|
58
|
+
with open(filepath, "a", encoding="utf-8") as f:
|
|
59
|
+
f.write(json.dumps(payload, ensure_ascii=False) + "\n")
|
|
60
|
+
|
|
61
|
+
print(f"[knight] reflection written to {filepath}")
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def write_supabase(payload, config):
|
|
65
|
+
"""Write reflection to Supabase REST API."""
|
|
66
|
+
import urllib.request
|
|
67
|
+
|
|
68
|
+
supabase_cfg = config.get("storage", {}).get("supabase", {})
|
|
69
|
+
url = supabase_cfg.get("url", "")
|
|
70
|
+
key = supabase_cfg.get("service_key", "") or os.environ.get("SUPABASE_SERVICE_KEY", "")
|
|
71
|
+
|
|
72
|
+
if not url or not key:
|
|
73
|
+
print("Error: Supabase URL or service key not configured.", file=sys.stderr)
|
|
74
|
+
sys.exit(1)
|
|
75
|
+
|
|
76
|
+
req = urllib.request.Request(
|
|
77
|
+
f"{url}/rest/v1/reflections",
|
|
78
|
+
data=json.dumps(payload).encode(),
|
|
79
|
+
headers={
|
|
80
|
+
"apikey": key,
|
|
81
|
+
"Authorization": f"Bearer {key}",
|
|
82
|
+
"Content-Type": "application/json",
|
|
83
|
+
"Prefer": "return=representation",
|
|
84
|
+
},
|
|
85
|
+
method="POST",
|
|
86
|
+
)
|
|
87
|
+
try:
|
|
88
|
+
with urllib.request.urlopen(req, timeout=10) as r:
|
|
89
|
+
d = json.load(r)
|
|
90
|
+
print(f"[knight] reflection written to Supabase: {d[0]['id'][:8]}...")
|
|
91
|
+
except Exception as e:
|
|
92
|
+
print(f"Error: Supabase write failed: {e}", file=sys.stderr)
|
|
93
|
+
sys.exit(1)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def main():
|
|
97
|
+
parser = argparse.ArgumentParser(description="Write a reflection after task completion.")
|
|
98
|
+
parser.add_argument("--context", required=True, help="Task context or title")
|
|
99
|
+
parser.add_argument("--what_worked", default="", help="What went well")
|
|
100
|
+
parser.add_argument("--what_failed", default="", help="What did not work")
|
|
101
|
+
parser.add_argument("--next_time", default="", help="Improvement for next time")
|
|
102
|
+
parser.add_argument("--tags", default="execution", help="Comma-separated tags")
|
|
103
|
+
parser.add_argument("--session_type", default="heartbeat", help="Session type")
|
|
104
|
+
parser.add_argument("--confidence", type=int, default=3, help="Confidence 1-5")
|
|
105
|
+
parser.add_argument("--task_type", default="", help="Task type")
|
|
106
|
+
args = parser.parse_args()
|
|
107
|
+
|
|
108
|
+
config = load_config()
|
|
109
|
+
|
|
110
|
+
payload = {
|
|
111
|
+
"context": args.context,
|
|
112
|
+
"what_worked": args.what_worked,
|
|
113
|
+
"what_failed": args.what_failed,
|
|
114
|
+
"next_time": args.next_time,
|
|
115
|
+
"tags": [t.strip() for t in args.tags.split(",") if t.strip()],
|
|
116
|
+
"session_type": args.session_type,
|
|
117
|
+
"confidence": args.confidence,
|
|
118
|
+
"task_type": args.task_type,
|
|
119
|
+
"created_at": datetime.now(timezone.utc).isoformat(),
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
storage_cfg = config.get("storage", {})
|
|
123
|
+
backend = storage_cfg.get("backend", "local")
|
|
124
|
+
|
|
125
|
+
if backend == "supabase" or storage_cfg.get("supabase", {}).get("enabled"):
|
|
126
|
+
write_supabase(payload, config)
|
|
127
|
+
else:
|
|
128
|
+
write_local(payload, config)
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
if __name__ == "__main__":
|
|
132
|
+
main()
|
package/src/chat.js
ADDED
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const fs = require('fs');
|
|
4
|
+
const path = require('path');
|
|
5
|
+
const https = require('https');
|
|
6
|
+
const readline = require('readline');
|
|
7
|
+
|
|
8
|
+
function loadEnv(workspace) {
|
|
9
|
+
const envPath = path.join(workspace, '.env');
|
|
10
|
+
const vars = {};
|
|
11
|
+
if (!fs.existsSync(envPath)) return vars;
|
|
12
|
+
const content = fs.readFileSync(envPath, 'utf-8');
|
|
13
|
+
for (const line of content.split('\n')) {
|
|
14
|
+
const trimmed = line.trim();
|
|
15
|
+
if (!trimmed || trimmed.startsWith('#')) continue;
|
|
16
|
+
const idx = trimmed.indexOf('=');
|
|
17
|
+
if (idx === -1) continue;
|
|
18
|
+
const key = trimmed.slice(0, idx).trim();
|
|
19
|
+
const val = trimmed.slice(idx + 1).trim();
|
|
20
|
+
vars[key] = val;
|
|
21
|
+
}
|
|
22
|
+
return vars;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
function loadSystemPrompt(workspace, config) {
|
|
26
|
+
const files = (config.model && config.model.system_prompt_files) || [
|
|
27
|
+
'SOUL.md',
|
|
28
|
+
'AGENTS.md',
|
|
29
|
+
'MEMORY.md',
|
|
30
|
+
'REDLINES.md',
|
|
31
|
+
];
|
|
32
|
+
const loaded = [];
|
|
33
|
+
const parts = [];
|
|
34
|
+
|
|
35
|
+
for (const file of files) {
|
|
36
|
+
const filePath = path.join(workspace, file);
|
|
37
|
+
if (!fs.existsSync(filePath)) continue;
|
|
38
|
+
const content = fs.readFileSync(filePath, 'utf-8').trim();
|
|
39
|
+
if (!content) continue;
|
|
40
|
+
parts.push(`<!-- ${file} -->\n${content}`);
|
|
41
|
+
loaded.push(file);
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
return { systemPrompt: parts.join('\n\n'), loadedFiles: loaded };
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
function callAnthropic(apiKey, model, messages, systemPrompt, maxTokens, onChunk) {
|
|
48
|
+
return new Promise((resolve, reject) => {
|
|
49
|
+
const body = JSON.stringify({
|
|
50
|
+
model,
|
|
51
|
+
max_tokens: maxTokens,
|
|
52
|
+
system: systemPrompt,
|
|
53
|
+
messages,
|
|
54
|
+
stream: true,
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
const options = {
|
|
58
|
+
hostname: 'api.anthropic.com',
|
|
59
|
+
path: '/v1/messages',
|
|
60
|
+
method: 'POST',
|
|
61
|
+
headers: {
|
|
62
|
+
'content-type': 'application/json',
|
|
63
|
+
'x-api-key': apiKey,
|
|
64
|
+
'anthropic-version': '2023-06-01',
|
|
65
|
+
},
|
|
66
|
+
};
|
|
67
|
+
|
|
68
|
+
const req = https.request(options, (res) => {
|
|
69
|
+
if (res.statusCode === 401) {
|
|
70
|
+
reject(new Error('Invalid API key. Check your ANTHROPIC_API_KEY.'));
|
|
71
|
+
return;
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
if (res.statusCode !== 200) {
|
|
75
|
+
let data = '';
|
|
76
|
+
res.on('data', (chunk) => (data += chunk));
|
|
77
|
+
res.on('end', () => {
|
|
78
|
+
try {
|
|
79
|
+
const err = JSON.parse(data);
|
|
80
|
+
reject(
|
|
81
|
+
new Error(
|
|
82
|
+
`API error (${err.error?.type || res.statusCode}): ${err.error?.message || data}`
|
|
83
|
+
)
|
|
84
|
+
);
|
|
85
|
+
} catch {
|
|
86
|
+
reject(new Error(`API error (${res.statusCode}): ${data}`));
|
|
87
|
+
}
|
|
88
|
+
});
|
|
89
|
+
return;
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
let buffer = '';
|
|
93
|
+
let fullText = '';
|
|
94
|
+
|
|
95
|
+
res.on('data', (chunk) => {
|
|
96
|
+
buffer += chunk.toString();
|
|
97
|
+
const lines = buffer.split('\n');
|
|
98
|
+
buffer = lines.pop();
|
|
99
|
+
|
|
100
|
+
for (const line of lines) {
|
|
101
|
+
if (!line.startsWith('data: ')) continue;
|
|
102
|
+
const payload = line.slice(6).trim();
|
|
103
|
+
if (payload === '[DONE]') continue;
|
|
104
|
+
try {
|
|
105
|
+
const event = JSON.parse(payload);
|
|
106
|
+
if (
|
|
107
|
+
event.type === 'content_block_delta' &&
|
|
108
|
+
event.delta?.type === 'text_delta'
|
|
109
|
+
) {
|
|
110
|
+
const text = event.delta.text;
|
|
111
|
+
fullText += text;
|
|
112
|
+
onChunk(text);
|
|
113
|
+
}
|
|
114
|
+
} catch {
|
|
115
|
+
// skip malformed events
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
});
|
|
119
|
+
|
|
120
|
+
res.on('end', () => resolve(fullText));
|
|
121
|
+
});
|
|
122
|
+
|
|
123
|
+
req.on('error', (err) => reject(new Error(`Network error: ${err.message}`)));
|
|
124
|
+
req.write(body);
|
|
125
|
+
req.end();
|
|
126
|
+
});
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
async function chat(config, workspace) {
|
|
130
|
+
const model = (config.model && config.model.name) || 'claude-sonnet-4-5';
|
|
131
|
+
const maxTokens = (config.model && config.model.max_tokens) || 8096;
|
|
132
|
+
|
|
133
|
+
const env = loadEnv(workspace);
|
|
134
|
+
const apiKey = env.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY;
|
|
135
|
+
|
|
136
|
+
if (!apiKey) {
|
|
137
|
+
console.log('\nNo Anthropic API key found.');
|
|
138
|
+
console.log(
|
|
139
|
+
'Run `knight init` to configure, or set ANTHROPIC_API_KEY in your workspace .env file.\n'
|
|
140
|
+
);
|
|
141
|
+
process.exit(1);
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
const { systemPrompt, loadedFiles } = loadSystemPrompt(workspace, config);
|
|
145
|
+
|
|
146
|
+
const separator = '──────────────────────────────────────────────────────';
|
|
147
|
+
|
|
148
|
+
console.log(`\nKnight Chat — ${model}`);
|
|
149
|
+
if (loadedFiles.length > 0) {
|
|
150
|
+
console.log(
|
|
151
|
+
`System prompt loaded from: ${loadedFiles.join(', ')} (${loadedFiles.length} files)`
|
|
152
|
+
);
|
|
153
|
+
} else {
|
|
154
|
+
console.log('System prompt: (no files found)');
|
|
155
|
+
}
|
|
156
|
+
console.log('Type /exit or Ctrl+C to quit, /clear to reset history, /memory to view system prompt');
|
|
157
|
+
console.log(separator);
|
|
158
|
+
|
|
159
|
+
const messages = [];
|
|
160
|
+
let turns = 0;
|
|
161
|
+
|
|
162
|
+
const rl = readline.createInterface({
|
|
163
|
+
input: process.stdin,
|
|
164
|
+
output: process.stdout,
|
|
165
|
+
});
|
|
166
|
+
|
|
167
|
+
const askUser = () => {
|
|
168
|
+
return new Promise((resolve) => {
|
|
169
|
+
rl.question('\nYou: ', (answer) => resolve(answer));
|
|
170
|
+
});
|
|
171
|
+
};
|
|
172
|
+
|
|
173
|
+
const shutdown = () => {
|
|
174
|
+
console.log(`\nGoodbye. Conversation had ${turns} turns.`);
|
|
175
|
+
rl.close();
|
|
176
|
+
process.exit(0);
|
|
177
|
+
};
|
|
178
|
+
|
|
179
|
+
rl.on('close', shutdown);
|
|
180
|
+
|
|
181
|
+
while (true) {
|
|
182
|
+
const input = await askUser();
|
|
183
|
+
if (input === null || input === undefined) break;
|
|
184
|
+
|
|
185
|
+
const trimmed = input.trim();
|
|
186
|
+
if (!trimmed) continue;
|
|
187
|
+
|
|
188
|
+
if (trimmed === '/exit') {
|
|
189
|
+
console.log(`\nGoodbye. Conversation had ${turns} turns.`);
|
|
190
|
+
rl.close();
|
|
191
|
+
process.exit(0);
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
if (trimmed === '/clear') {
|
|
195
|
+
messages.length = 0;
|
|
196
|
+
turns = 0;
|
|
197
|
+
console.log('History cleared.');
|
|
198
|
+
continue;
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
if (trimmed === '/memory') {
|
|
202
|
+
console.log(`\n${separator}`);
|
|
203
|
+
console.log(systemPrompt || '(empty system prompt)');
|
|
204
|
+
console.log(separator);
|
|
205
|
+
continue;
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
if (trimmed === '/help') {
|
|
209
|
+
console.log('\nCommands:');
|
|
210
|
+
console.log(' /exit — Exit chat');
|
|
211
|
+
console.log(' /clear — Clear conversation history');
|
|
212
|
+
console.log(' /memory — Show system prompt');
|
|
213
|
+
console.log(' /help — Show this help');
|
|
214
|
+
continue;
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
messages.push({ role: 'user', content: trimmed });
|
|
218
|
+
|
|
219
|
+
process.stdout.write('\nAssistant: ');
|
|
220
|
+
|
|
221
|
+
try {
|
|
222
|
+
const reply = await callAnthropic(apiKey, model, messages, systemPrompt, maxTokens, (chunk) => {
|
|
223
|
+
process.stdout.write(chunk);
|
|
224
|
+
});
|
|
225
|
+
process.stdout.write('\n');
|
|
226
|
+
messages.push({ role: 'assistant', content: reply });
|
|
227
|
+
turns++;
|
|
228
|
+
} catch (err) {
|
|
229
|
+
process.stdout.write('\n');
|
|
230
|
+
console.log(`Error: ${err.message}`);
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
console.log(separator);
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
module.exports = { chat, loadEnv, loadSystemPrompt, callAnthropic };
|