seraph-agent 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- brain/acp/__init__.py +1 -0
- brain/acp/server.py +380 -0
- brain/agent/__init__.py +0 -0
- brain/agent/ambient_learning.py +218 -0
- brain/agent/bootstrap.py +107 -0
- brain/agent/btw.py +40 -0
- brain/agent/cache.py +89 -0
- brain/agent/command_queue.py +119 -0
- brain/agent/compaction.py +256 -0
- brain/agent/confidence.py +183 -0
- brain/agent/context_anchors.py +139 -0
- brain/agent/context_manager.py +84 -0
- brain/agent/context_references.py +76 -0
- brain/agent/conversation_branching.py +189 -0
- brain/agent/core.py +221 -0
- brain/agent/cost_router.py +234 -0
- brain/agent/debounce.py +118 -0
- brain/agent/delivery.py +176 -0
- brain/agent/device_pairing.py +151 -0
- brain/agent/dock.py +55 -0
- brain/agent/doctor.py +174 -0
- brain/agent/dreaming.py +193 -0
- brain/agent/event_stream.py +87 -0
- brain/agent/failover.py +347 -0
- brain/agent/focus.py +63 -0
- brain/agent/heartbeat.py +201 -0
- brain/agent/i18n.py +112 -0
- brain/agent/insights.py +93 -0
- brain/agent/link_understanding.py +183 -0
- brain/agent/llm.py +792 -0
- brain/agent/loop_detection.py +189 -0
- brain/agent/markdown_chunks.py +108 -0
- brain/agent/model_router.py +259 -0
- brain/agent/multi_model_consensus.py +186 -0
- brain/agent/pairing.py +128 -0
- brain/agent/personas.py +66 -0
- brain/agent/preemptive_checks.py +232 -0
- brain/agent/process_registry.py +153 -0
- brain/agent/process_tree.py +112 -0
- brain/agent/prompt_builder.py +87 -0
- brain/agent/prompt_compiler.py +141 -0
- brain/agent/prompt_pressure.py +244 -0
- brain/agent/redact.py +76 -0
- brain/agent/restart_recovery.py +132 -0
- brain/agent/self_healing.py +288 -0
- brain/agent/self_improve.py +285 -0
- brain/agent/setup_wizard.py +198 -0
- brain/agent/skill_extraction.py +243 -0
- brain/agent/ssrf_guard.py +124 -0
- brain/agent/tailscale.py +60 -0
- brain/agent/thinking.py +102 -0
- brain/agent/thread_ownership.py +98 -0
- brain/agent/title_generator.py +57 -0
- brain/agent/token_budget_viz.py +210 -0
- brain/agent/tool_prediction.py +174 -0
- brain/agent/trajectory.py +136 -0
- brain/agent/url_safety.py +163 -0
- brain/agent/video_generation.py +62 -0
- brain/auth.py +103 -0
- brain/channels/__init__.py +0 -0
- brain/channels/base.py +329 -0
- brain/channels/canvas.py +244 -0
- brain/channels/dingtalk_bot.py +70 -0
- brain/channels/discord_bot.py +349 -0
- brain/channels/discord_features.py +263 -0
- brain/channels/email_bot.py +81 -0
- brain/channels/feishu_bot.py +105 -0
- brain/channels/google_chat_bot.py +70 -0
- brain/channels/imessage_bot.py +64 -0
- brain/channels/irc_bot.py +83 -0
- brain/channels/line_bot.py +74 -0
- brain/channels/matrix_bot.py +73 -0
- brain/channels/mattermost_bot.py +87 -0
- brain/channels/msteams_bot.py +75 -0
- brain/channels/nostr_bot.py +69 -0
- brain/channels/pwa.py +270 -0
- brain/channels/signal_bot.py +65 -0
- brain/channels/slack_bot.py +190 -0
- brain/channels/slack_features.py +196 -0
- brain/channels/sms_bot.py +75 -0
- brain/channels/telegram.py +1682 -0
- brain/channels/telegram_features.py +323 -0
- brain/channels/twitch_bot.py +75 -0
- brain/channels/voice_wake.py +263 -0
- brain/channels/webchat.py +136 -0
- brain/channels/whatsapp_bot.py +160 -0
- brain/channels/whatsapp_features.py +281 -0
- brain/cli.py +181 -0
- brain/config.py +204 -0
- brain/config_schema.py +221 -0
- brain/control_ui/__init__.py +1 -0
- brain/control_ui/api.py +603 -0
- brain/cron/__init__.py +0 -0
- brain/cron/scheduler.py +169 -0
- brain/diagnostics/__init__.py +1 -0
- brain/diagnostics/telemetry.py +199 -0
- brain/eval/__init__.py +0 -0
- brain/eval/runner.py +201 -0
- brain/mcp_server.py +164 -0
- brain/memory/__init__.py +0 -0
- brain/memory/store.py +293 -0
- brain/models/router.py +273 -0
- brain/plugins/__init__.py +0 -0
- brain/plugins/example_plugin.py +37 -0
- brain/plugins/loader.py +322 -0
- brain/rules/__init__.py +0 -0
- brain/rules/enforcer.py +180 -0
- brain/secrets/__init__.py +1 -0
- brain/secrets/manager.py +277 -0
- brain/security/__init__.py +1 -0
- brain/security/sandbox.py +200 -0
- brain/server.py +272 -0
- brain/sessions/__init__.py +0 -0
- brain/sessions/store.py +330 -0
- brain/sessions/sync.py +94 -0
- brain/skills/__init__.py +0 -0
- brain/skills/generator.py +185 -0
- brain/skills/guard.py +232 -0
- brain/skills/marketplace.py +222 -0
- brain/tools/__init__.py +0 -0
- brain/tools/approval.py +84 -0
- brain/tools/audit.py +294 -0
- brain/tools/browser.py +112 -0
- brain/tools/checkpoint.py +86 -0
- brain/tools/clarify.py +43 -0
- brain/tools/clipboard.py +84 -0
- brain/tools/context.py +65 -0
- brain/tools/database.py +77 -0
- brain/tools/delegate.py +133 -0
- brain/tools/email_tool.py +80 -0
- brain/tools/export.py +83 -0
- brain/tools/export_html.py +93 -0
- brain/tools/files.py +155 -0
- brain/tools/filter.py +83 -0
- brain/tools/fuzzy.py +109 -0
- brain/tools/git.py +107 -0
- brain/tools/grounding.py +137 -0
- brain/tools/health.py +63 -0
- brain/tools/homeassistant.py +95 -0
- brain/tools/image.py +89 -0
- brain/tools/interrupt.py +59 -0
- brain/tools/mcp.py +135 -0
- brain/tools/mixture.py +169 -0
- brain/tools/notify.py +52 -0
- brain/tools/openapi_import.py +169 -0
- brain/tools/patch.py +120 -0
- brain/tools/progress.py +77 -0
- brain/tools/qr_code.py +73 -0
- brain/tools/ratelimit.py +59 -0
- brain/tools/registry.py +75 -0
- brain/tools/sandbox.py +112 -0
- brain/tools/session_search.py +55 -0
- brain/tools/skills_hub.py +134 -0
- brain/tools/ssh.py +82 -0
- brain/tools/terminal.py +112 -0
- brain/tools/todo.py +84 -0
- brain/tools/transcribe.py +74 -0
- brain/tools/usage.py +106 -0
- brain/tools/vision.py +99 -0
- brain/tools/voice.py +88 -0
- brain/tools/voice_mode.py +96 -0
- brain/tools/web.py +128 -0
- brain/tools/webhook.py +138 -0
- brain/tui/__init__.py +1 -0
- brain/tui/app.py +203 -0
- seraph_agent-0.2.0.dist-info/METADATA +277 -0
- seraph_agent-0.2.0.dist-info/RECORD +170 -0
- seraph_agent-0.2.0.dist-info/WHEEL +5 -0
- seraph_agent-0.2.0.dist-info/entry_points.txt +2 -0
- seraph_agent-0.2.0.dist-info/top_level.txt +1 -0
brain/acp/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .server import ACPServer
|
brain/acp/server.py
ADDED
|
@@ -0,0 +1,380 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Seraph ACP Server — Agent Client Protocol for IDE integration.
|
|
3
|
+
|
|
4
|
+
ACP lets IDEs (VS Code, JetBrains, Cursor) drive Seraph over stdio.
|
|
5
|
+
The IDE sends prompts, Seraph executes tools and streams responses back.
|
|
6
|
+
|
|
7
|
+
Protocol: NDJSON over stdio (same as OpenClaw's ACP bridge).
|
|
8
|
+
|
|
9
|
+
Supported ACP methods:
|
|
10
|
+
- initialize — handshake + capabilities
|
|
11
|
+
- newSession — create a fresh session
|
|
12
|
+
- loadSession — reconnect to existing session
|
|
13
|
+
- listSessions — list available sessions
|
|
14
|
+
- prompt — send a user message
|
|
15
|
+
- cancel — abort current generation
|
|
16
|
+
- session/set_mode — change thinking/tool verbosity
|
|
17
|
+
|
|
18
|
+
Usage:
|
|
19
|
+
python -m brain.acp.server # stdio mode
|
|
20
|
+
seraph acp # via CLI
|
|
21
|
+
|
|
22
|
+
Configure in VS Code settings.json:
|
|
23
|
+
"acp.command": "seraph acp"
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
import sys
|
|
27
|
+
import json
|
|
28
|
+
import time
|
|
29
|
+
import uuid
|
|
30
|
+
import logging
|
|
31
|
+
import asyncio
|
|
32
|
+
from typing import Dict, List, Optional, Any
|
|
33
|
+
from pathlib import Path
|
|
34
|
+
|
|
35
|
+
logger = logging.getLogger("seraph.acp")
|
|
36
|
+
|
|
37
|
+
SERAPH_DIR = Path.home() / ".seraph"
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class ACPSession:
|
|
41
|
+
"""An ACP session bound to a Seraph conversation."""
|
|
42
|
+
def __init__(self, session_id: str, seraph_session_key: str = ""):
|
|
43
|
+
self.session_id = session_id
|
|
44
|
+
self.seraph_key = seraph_session_key or f"acp_{session_id}"
|
|
45
|
+
self.created_at = time.time()
|
|
46
|
+
self.message_count = 0
|
|
47
|
+
self.active_prompt_id: Optional[str] = None
|
|
48
|
+
self.cancelled = False
|
|
49
|
+
self.mode = {
|
|
50
|
+
"think_level": "normal", # "off", "normal", "high"
|
|
51
|
+
"tool_verbosity": "normal", # "quiet", "normal", "verbose"
|
|
52
|
+
"reasoning": True,
|
|
53
|
+
"usage_detail": False,
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class ACPServer:
|
|
58
|
+
"""
|
|
59
|
+
ACP server that bridges IDE prompts to Seraph's agent brain.
|
|
60
|
+
|
|
61
|
+
Communicates over stdin/stdout using NDJSON.
|
|
62
|
+
Each line is a JSON object with "method" and optional "id"/"params".
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
def __init__(self):
|
|
66
|
+
self.sessions: Dict[str, ACPSession] = {}
|
|
67
|
+
self.active_session: Optional[ACPSession] = None
|
|
68
|
+
self._initialized = False
|
|
69
|
+
self._abort_event = asyncio.Event()
|
|
70
|
+
|
|
71
|
+
def handle_message(self, message: dict) -> Optional[dict]:
|
|
72
|
+
"""Handle an incoming ACP message. Returns response or None for notifications."""
|
|
73
|
+
method = message.get("method", "")
|
|
74
|
+
params = message.get("params", {})
|
|
75
|
+
msg_id = message.get("id")
|
|
76
|
+
|
|
77
|
+
handler = {
|
|
78
|
+
"initialize": self._handle_initialize,
|
|
79
|
+
"newSession": self._handle_new_session,
|
|
80
|
+
"loadSession": self._handle_load_session,
|
|
81
|
+
"listSessions": self._handle_list_sessions,
|
|
82
|
+
"prompt": self._handle_prompt,
|
|
83
|
+
"cancel": self._handle_cancel,
|
|
84
|
+
"session/set_mode": self._handle_set_mode,
|
|
85
|
+
"ping": self._handle_ping,
|
|
86
|
+
"shutdown": self._handle_shutdown,
|
|
87
|
+
"notifications/initialized": lambda p: None, # Notification, no response
|
|
88
|
+
}.get(method)
|
|
89
|
+
|
|
90
|
+
if handler is None:
|
|
91
|
+
return self._error(msg_id, -32601, f"Unknown method: {method}")
|
|
92
|
+
|
|
93
|
+
try:
|
|
94
|
+
result = handler(params)
|
|
95
|
+
if result is None and method.startswith("notifications/"):
|
|
96
|
+
return None # Notifications don't get responses
|
|
97
|
+
return self._respond(msg_id, result or {})
|
|
98
|
+
except Exception as e:
|
|
99
|
+
logger.error(f"ACP handler error for {method}: {e}")
|
|
100
|
+
return self._error(msg_id, -32000, str(e))
|
|
101
|
+
|
|
102
|
+
# ─── ACP Method Handlers ──────────────────────────────────────
|
|
103
|
+
|
|
104
|
+
def _handle_initialize(self, params: dict) -> dict:
|
|
105
|
+
"""Handshake — exchange capabilities."""
|
|
106
|
+
self._initialized = True
|
|
107
|
+
return {
|
|
108
|
+
"protocolVersion": "2024-11-05",
|
|
109
|
+
"capabilities": {
|
|
110
|
+
"tools": True,
|
|
111
|
+
"streaming": True,
|
|
112
|
+
"sessions": True,
|
|
113
|
+
"modes": ["think_level", "tool_verbosity", "reasoning"],
|
|
114
|
+
},
|
|
115
|
+
"serverInfo": {
|
|
116
|
+
"name": "seraph",
|
|
117
|
+
"version": "0.2.0",
|
|
118
|
+
"description": "The AI agent that audits itself",
|
|
119
|
+
},
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
def _handle_new_session(self, params: dict) -> dict:
|
|
123
|
+
"""Create a fresh ACP session."""
|
|
124
|
+
session_id = params.get("sessionId", str(uuid.uuid4())[:8])
|
|
125
|
+
session = ACPSession(session_id)
|
|
126
|
+
self.sessions[session_id] = session
|
|
127
|
+
self.active_session = session
|
|
128
|
+
|
|
129
|
+
logger.info(f"ACP new session: {session_id}")
|
|
130
|
+
return {
|
|
131
|
+
"sessionId": session_id,
|
|
132
|
+
"created": True,
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
def _handle_load_session(self, params: dict) -> dict:
|
|
136
|
+
"""Reconnect to an existing session."""
|
|
137
|
+
session_id = params.get("sessionId", "")
|
|
138
|
+
|
|
139
|
+
if session_id in self.sessions:
|
|
140
|
+
self.active_session = self.sessions[session_id]
|
|
141
|
+
return {"sessionId": session_id, "loaded": True, "messageCount": self.active_session.message_count}
|
|
142
|
+
|
|
143
|
+
# Try loading from session store
|
|
144
|
+
try:
|
|
145
|
+
from sessions.store import get_session_store
|
|
146
|
+
store = get_session_store()
|
|
147
|
+
session_data = store.get_session(f"acp_{session_id}")
|
|
148
|
+
if session_data:
|
|
149
|
+
session = ACPSession(session_id, f"acp_{session_id}")
|
|
150
|
+
session.message_count = session_data.get("message_count", 0)
|
|
151
|
+
self.sessions[session_id] = session
|
|
152
|
+
self.active_session = session
|
|
153
|
+
|
|
154
|
+
# Replay message history
|
|
155
|
+
messages = store.get_messages(f"acp_{session_id}")
|
|
156
|
+
history = []
|
|
157
|
+
for msg in messages:
|
|
158
|
+
history.append({
|
|
159
|
+
"role": msg["role"],
|
|
160
|
+
"content": msg["content"],
|
|
161
|
+
})
|
|
162
|
+
self._emit_event("session_history", {
|
|
163
|
+
"role": msg["role"],
|
|
164
|
+
"content": msg["content"],
|
|
165
|
+
})
|
|
166
|
+
|
|
167
|
+
return {"sessionId": session_id, "loaded": True, "messageCount": len(messages)}
|
|
168
|
+
except Exception as e:
|
|
169
|
+
logger.warning(f"Failed to load session {session_id}: {e}")
|
|
170
|
+
|
|
171
|
+
# Create new if not found
|
|
172
|
+
return self._handle_new_session({"sessionId": session_id})
|
|
173
|
+
|
|
174
|
+
def _handle_list_sessions(self, params: dict) -> dict:
|
|
175
|
+
"""List available sessions."""
|
|
176
|
+
sessions = []
|
|
177
|
+
for sid, s in self.sessions.items():
|
|
178
|
+
sessions.append({
|
|
179
|
+
"sessionId": sid,
|
|
180
|
+
"messageCount": s.message_count,
|
|
181
|
+
"createdAt": s.created_at,
|
|
182
|
+
"active": s == self.active_session,
|
|
183
|
+
})
|
|
184
|
+
|
|
185
|
+
# Also check session store
|
|
186
|
+
try:
|
|
187
|
+
from sessions.store import get_session_store
|
|
188
|
+
store = get_session_store()
|
|
189
|
+
stored = store.list_sessions(status="active", limit=20)
|
|
190
|
+
for s in stored:
|
|
191
|
+
if s["id"].startswith("acp_"):
|
|
192
|
+
acp_id = s["id"][4:]
|
|
193
|
+
if acp_id not in self.sessions:
|
|
194
|
+
sessions.append({
|
|
195
|
+
"sessionId": acp_id,
|
|
196
|
+
"messageCount": s.get("message_count", 0),
|
|
197
|
+
"createdAt": s.get("created_at", 0),
|
|
198
|
+
"active": False,
|
|
199
|
+
})
|
|
200
|
+
except Exception:
|
|
201
|
+
pass
|
|
202
|
+
|
|
203
|
+
return {"sessions": sessions}
|
|
204
|
+
|
|
205
|
+
def _handle_prompt(self, params: dict) -> dict:
|
|
206
|
+
"""Process a user prompt through the agent."""
|
|
207
|
+
if not self.active_session:
|
|
208
|
+
self._handle_new_session({})
|
|
209
|
+
|
|
210
|
+
session = self.active_session
|
|
211
|
+
text = ""
|
|
212
|
+
|
|
213
|
+
# Extract text from prompt content
|
|
214
|
+
content = params.get("content", [])
|
|
215
|
+
if isinstance(content, str):
|
|
216
|
+
text = content
|
|
217
|
+
elif isinstance(content, list):
|
|
218
|
+
for block in content:
|
|
219
|
+
if isinstance(block, dict) and block.get("type") == "text":
|
|
220
|
+
text += block.get("text", "")
|
|
221
|
+
elif isinstance(block, str):
|
|
222
|
+
text += block
|
|
223
|
+
|
|
224
|
+
if not text:
|
|
225
|
+
return {"error": "Empty prompt"}
|
|
226
|
+
|
|
227
|
+
session.message_count += 1
|
|
228
|
+
session.cancelled = False
|
|
229
|
+
prompt_id = str(uuid.uuid4())[:8]
|
|
230
|
+
session.active_prompt_id = prompt_id
|
|
231
|
+
|
|
232
|
+
# Emit thinking status
|
|
233
|
+
self._emit_event("status", {"state": "thinking", "promptId": prompt_id})
|
|
234
|
+
|
|
235
|
+
# Process through brain
|
|
236
|
+
try:
|
|
237
|
+
response_text = self._process_prompt(session, text)
|
|
238
|
+
|
|
239
|
+
if session.cancelled:
|
|
240
|
+
self._emit_event("status", {"state": "cancelled", "promptId": prompt_id})
|
|
241
|
+
return {"cancelled": True}
|
|
242
|
+
|
|
243
|
+
# Emit response
|
|
244
|
+
self._emit_event("assistant_message", {
|
|
245
|
+
"content": response_text,
|
|
246
|
+
"promptId": prompt_id,
|
|
247
|
+
})
|
|
248
|
+
|
|
249
|
+
session.active_prompt_id = None
|
|
250
|
+
self._emit_event("status", {"state": "idle"})
|
|
251
|
+
|
|
252
|
+
return {
|
|
253
|
+
"promptId": prompt_id,
|
|
254
|
+
"content": response_text,
|
|
255
|
+
"stopReason": "end_turn",
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
except Exception as e:
|
|
259
|
+
logger.error(f"ACP prompt error: {e}")
|
|
260
|
+
self._emit_event("status", {"state": "error", "error": str(e)})
|
|
261
|
+
return {"error": str(e)}
|
|
262
|
+
|
|
263
|
+
def _handle_cancel(self, params: dict) -> dict:
|
|
264
|
+
"""Cancel the current generation."""
|
|
265
|
+
if self.active_session:
|
|
266
|
+
self.active_session.cancelled = True
|
|
267
|
+
self.active_session.active_prompt_id = None
|
|
268
|
+
self._abort_event.set()
|
|
269
|
+
return {"cancelled": True}
|
|
270
|
+
|
|
271
|
+
def _handle_set_mode(self, params: dict) -> dict:
|
|
272
|
+
"""Change session mode settings."""
|
|
273
|
+
if not self.active_session:
|
|
274
|
+
return {"error": "No active session"}
|
|
275
|
+
|
|
276
|
+
for key in ("think_level", "tool_verbosity", "reasoning", "usage_detail"):
|
|
277
|
+
if key in params:
|
|
278
|
+
self.active_session.mode[key] = params[key]
|
|
279
|
+
|
|
280
|
+
return {"mode": self.active_session.mode}
|
|
281
|
+
|
|
282
|
+
def _handle_ping(self, params: dict) -> dict:
|
|
283
|
+
return {}
|
|
284
|
+
|
|
285
|
+
def _handle_shutdown(self, params: dict) -> dict:
|
|
286
|
+
logger.info("ACP shutdown requested")
|
|
287
|
+
return {"ok": True}
|
|
288
|
+
|
|
289
|
+
# ─── Brain Integration ────────────────────────────────────────
|
|
290
|
+
|
|
291
|
+
def _process_prompt(self, session: ACPSession, text: str) -> str:
|
|
292
|
+
"""Send prompt to Seraph brain and get response."""
|
|
293
|
+
try:
|
|
294
|
+
import os
|
|
295
|
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
296
|
+
from agent.llm import LLMProvider
|
|
297
|
+
|
|
298
|
+
provider = os.environ.get("SERAPH_PROVIDER", "anthropic")
|
|
299
|
+
model = os.environ.get("SERAPH_MODEL", "claude-sonnet-4-6")
|
|
300
|
+
|
|
301
|
+
llm = LLMProvider(provider=provider, model=model)
|
|
302
|
+
|
|
303
|
+
# Load identity
|
|
304
|
+
identity = "You are Seraph, a coding AI agent."
|
|
305
|
+
soul_path = SERAPH_DIR / "SOUL.md"
|
|
306
|
+
if soul_path.exists():
|
|
307
|
+
identity = soul_path.read_text()
|
|
308
|
+
|
|
309
|
+
# Simple call for now — full tool loop integration TODO
|
|
310
|
+
response = llm.chat(
|
|
311
|
+
messages=[{"role": "user", "content": text}],
|
|
312
|
+
system=identity,
|
|
313
|
+
)
|
|
314
|
+
return response.text
|
|
315
|
+
|
|
316
|
+
except Exception as e:
|
|
317
|
+
logger.error(f"Brain call failed: {e}")
|
|
318
|
+
return f"Error: {e}"
|
|
319
|
+
|
|
320
|
+
# ─── Event Emission ───────────────────────────────────────────
|
|
321
|
+
|
|
322
|
+
def _emit_event(self, event_type: str, data: dict):
|
|
323
|
+
"""Send an event notification to the IDE."""
|
|
324
|
+
event = {
|
|
325
|
+
"jsonrpc": "2.0",
|
|
326
|
+
"method": f"notifications/{event_type}",
|
|
327
|
+
"params": data,
|
|
328
|
+
}
|
|
329
|
+
self._write(event)
|
|
330
|
+
|
|
331
|
+
# ─── Protocol Helpers ─────────────────────────────────────────
|
|
332
|
+
|
|
333
|
+
def _respond(self, msg_id: Any, result: dict) -> dict:
|
|
334
|
+
return {"jsonrpc": "2.0", "id": msg_id, "result": result}
|
|
335
|
+
|
|
336
|
+
def _error(self, msg_id: Any, code: int, message: str) -> dict:
|
|
337
|
+
return {"jsonrpc": "2.0", "id": msg_id, "error": {"code": code, "message": message}}
|
|
338
|
+
|
|
339
|
+
def _write(self, data: dict):
|
|
340
|
+
"""Write NDJSON to stdout."""
|
|
341
|
+
sys.stdout.write(json.dumps(data) + "\n")
|
|
342
|
+
sys.stdout.flush()
|
|
343
|
+
|
|
344
|
+
# ─── Main Loop ────────────────────────────────────────────────
|
|
345
|
+
|
|
346
|
+
def run_stdio(self):
|
|
347
|
+
"""Run ACP server over stdio."""
|
|
348
|
+
logger.info("Seraph ACP server starting (stdio)")
|
|
349
|
+
|
|
350
|
+
for line in sys.stdin:
|
|
351
|
+
line = line.strip()
|
|
352
|
+
if not line:
|
|
353
|
+
continue
|
|
354
|
+
|
|
355
|
+
try:
|
|
356
|
+
message = json.loads(line)
|
|
357
|
+
except json.JSONDecodeError:
|
|
358
|
+
continue
|
|
359
|
+
|
|
360
|
+
response = self.handle_message(message)
|
|
361
|
+
if response is not None:
|
|
362
|
+
self._write(response)
|
|
363
|
+
|
|
364
|
+
# Check for shutdown
|
|
365
|
+
if message.get("method") == "shutdown":
|
|
366
|
+
break
|
|
367
|
+
|
|
368
|
+
|
|
369
|
+
def main():
|
|
370
|
+
logging.basicConfig(
|
|
371
|
+
level=logging.INFO,
|
|
372
|
+
format="%(asctime)s [%(name)s] %(message)s",
|
|
373
|
+
stream=sys.stderr,
|
|
374
|
+
)
|
|
375
|
+
server = ACPServer()
|
|
376
|
+
server.run_stdio()
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
if __name__ == "__main__":
|
|
380
|
+
main()
|
brain/agent/__init__.py
ADDED
|
File without changes
|
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Seraph Ambient Learning — learn from user behavior without being asked.
|
|
3
|
+
|
|
4
|
+
Tracks silently:
|
|
5
|
+
- Which responses get retried (user didn't like it)
|
|
6
|
+
- Which tool calls get undone (wrong action)
|
|
7
|
+
- Which corrections the user makes ("no, I meant X")
|
|
8
|
+
- Response length preferences (do they prefer terse or detailed?)
|
|
9
|
+
- Time-of-day patterns (what do they work on when?)
|
|
10
|
+
- Command frequency (what do they do most?)
|
|
11
|
+
|
|
12
|
+
Builds a shadow profile that improves over time.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
import time
|
|
17
|
+
import logging
|
|
18
|
+
import os
|
|
19
|
+
from typing import Dict, List, Optional
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
from collections import defaultdict
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger("seraph.ambient_learning")
|
|
24
|
+
|
|
25
|
+
SERAPH_DIR = Path.home() / ".seraph"
|
|
26
|
+
LEARNING_PATH = SERAPH_DIR / "ambient_learnings.json"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class AmbientLearner:
|
|
30
|
+
"""Silently learns from user behavior patterns."""
|
|
31
|
+
|
|
32
|
+
def __init__(self):
|
|
33
|
+
self.data = {
|
|
34
|
+
"corrections": [], # "No, I meant X" patterns
|
|
35
|
+
"retries": [], # Messages that got /retry'd
|
|
36
|
+
"undos": [], # Actions that got /undo'd
|
|
37
|
+
"preferences": {}, # Detected preferences
|
|
38
|
+
"tool_frequency": {}, # Which tools used most
|
|
39
|
+
"time_patterns": {}, # Activity by hour
|
|
40
|
+
"response_feedback": { # Implicit feedback
|
|
41
|
+
"too_long": 0,
|
|
42
|
+
"too_short": 0,
|
|
43
|
+
"just_right": 0,
|
|
44
|
+
},
|
|
45
|
+
"command_history": [], # Recent commands for pattern detection
|
|
46
|
+
}
|
|
47
|
+
self._load()
|
|
48
|
+
|
|
49
|
+
def _load(self):
|
|
50
|
+
if LEARNING_PATH.exists():
|
|
51
|
+
try:
|
|
52
|
+
with open(LEARNING_PATH, encoding="utf-8") as f:
|
|
53
|
+
saved = json.load(f)
|
|
54
|
+
self.data.update(saved)
|
|
55
|
+
except Exception:
|
|
56
|
+
pass
|
|
57
|
+
|
|
58
|
+
def _save(self):
|
|
59
|
+
try:
|
|
60
|
+
SERAPH_DIR.mkdir(parents=True, exist_ok=True)
|
|
61
|
+
tmp = str(LEARNING_PATH) + ".tmp"
|
|
62
|
+
with open(tmp, "w", encoding="utf-8") as f:
|
|
63
|
+
json.dump(self.data, f, indent=2, default=str)
|
|
64
|
+
os.replace(tmp, str(LEARNING_PATH))
|
|
65
|
+
except Exception as e:
|
|
66
|
+
logger.debug(f"Save learnings: {e}")
|
|
67
|
+
|
|
68
|
+
# ─── Event Tracking ──────────────────────────────────────────
|
|
69
|
+
|
|
70
|
+
def on_message(self, user_id: str, text: str):
|
|
71
|
+
"""Track every incoming message for patterns."""
|
|
72
|
+
import datetime
|
|
73
|
+
hour = datetime.datetime.now().hour
|
|
74
|
+
hour_key = str(hour)
|
|
75
|
+
self.data["time_patterns"][hour_key] = self.data["time_patterns"].get(hour_key, 0) + 1
|
|
76
|
+
|
|
77
|
+
# Detect corrections
|
|
78
|
+
corrections = [
|
|
79
|
+
"no, i meant", "no i meant", "that's not what i",
|
|
80
|
+
"i said", "not that", "wrong", "i wanted",
|
|
81
|
+
"actually, ", "actually i", "no, ", "nope, ",
|
|
82
|
+
]
|
|
83
|
+
text_lower = text.lower()
|
|
84
|
+
for phrase in corrections:
|
|
85
|
+
if text_lower.startswith(phrase) or f" {phrase}" in text_lower:
|
|
86
|
+
self.data["corrections"].append({
|
|
87
|
+
"text": text[:200],
|
|
88
|
+
"time": time.time(),
|
|
89
|
+
"user": user_id,
|
|
90
|
+
})
|
|
91
|
+
self.data["corrections"] = self.data["corrections"][-50:]
|
|
92
|
+
self._detect_preference_from_correction(text)
|
|
93
|
+
break
|
|
94
|
+
|
|
95
|
+
# Track command usage
|
|
96
|
+
if text.startswith("/"):
|
|
97
|
+
cmd = text.split()[0]
|
|
98
|
+
self.data["command_history"].append({"cmd": cmd, "time": time.time()})
|
|
99
|
+
self.data["command_history"] = self.data["command_history"][-200:]
|
|
100
|
+
|
|
101
|
+
def on_retry(self, user_id: str, original_message: str):
|
|
102
|
+
"""Track when a user retries (implies dissatisfaction)."""
|
|
103
|
+
self.data["retries"].append({
|
|
104
|
+
"message": original_message[:200],
|
|
105
|
+
"time": time.time(),
|
|
106
|
+
"user": user_id,
|
|
107
|
+
})
|
|
108
|
+
self.data["retries"] = self.data["retries"][-30:]
|
|
109
|
+
self._save()
|
|
110
|
+
|
|
111
|
+
def on_undo(self, user_id: str, undone_action: str):
|
|
112
|
+
"""Track when a user undoes an action."""
|
|
113
|
+
self.data["undos"].append({
|
|
114
|
+
"action": undone_action[:200],
|
|
115
|
+
"time": time.time(),
|
|
116
|
+
"user": user_id,
|
|
117
|
+
})
|
|
118
|
+
self.data["undos"] = self.data["undos"][-30:]
|
|
119
|
+
self._save()
|
|
120
|
+
|
|
121
|
+
def on_tool_call(self, tool_name: str):
|
|
122
|
+
"""Track tool usage frequency."""
|
|
123
|
+
self.data["tool_frequency"][tool_name] = self.data["tool_frequency"].get(tool_name, 0) + 1
|
|
124
|
+
|
|
125
|
+
def on_response_sent(self, response_length: int, was_retry: bool = False):
|
|
126
|
+
"""Track response length patterns."""
|
|
127
|
+
if was_retry:
|
|
128
|
+
# If they retried, the previous response was probably wrong length
|
|
129
|
+
if response_length > 500:
|
|
130
|
+
self.data["response_feedback"]["too_long"] += 1
|
|
131
|
+
else:
|
|
132
|
+
self.data["response_feedback"]["too_short"] += 1
|
|
133
|
+
else:
|
|
134
|
+
self.data["response_feedback"]["just_right"] += 1
|
|
135
|
+
|
|
136
|
+
# Save periodically (every 10 events)
|
|
137
|
+
total = sum(self.data["response_feedback"].values())
|
|
138
|
+
if total % 10 == 0:
|
|
139
|
+
self._save()
|
|
140
|
+
|
|
141
|
+
# ─── Preference Detection ────────────────────────────────────
|
|
142
|
+
|
|
143
|
+
def _detect_preference_from_correction(self, text: str):
|
|
144
|
+
"""Extract a preference from a correction."""
|
|
145
|
+
text_lower = text.lower()
|
|
146
|
+
|
|
147
|
+
# Length preference
|
|
148
|
+
if any(w in text_lower for w in ["shorter", "brief", "concise", "terse", "less"]):
|
|
149
|
+
self.data["preferences"]["response_length"] = "short"
|
|
150
|
+
elif any(w in text_lower for w in ["longer", "more detail", "elaborate", "explain more"]):
|
|
151
|
+
self.data["preferences"]["response_length"] = "long"
|
|
152
|
+
|
|
153
|
+
# Format preference
|
|
154
|
+
if any(w in text_lower for w in ["no emoji", "no emojis", "stop using emoji"]):
|
|
155
|
+
self.data["preferences"]["emojis"] = False
|
|
156
|
+
if any(w in text_lower for w in ["code only", "just the code", "skip the explanation"]):
|
|
157
|
+
self.data["preferences"]["code_only"] = True
|
|
158
|
+
if any(w in text_lower for w in ["don't use markdown", "plain text"]):
|
|
159
|
+
self.data["preferences"]["markdown"] = False
|
|
160
|
+
|
|
161
|
+
self._save()
|
|
162
|
+
|
|
163
|
+
# ─── Insights ─────────────────────────────────────────────────
|
|
164
|
+
|
|
165
|
+
def get_insights(self) -> Dict:
|
|
166
|
+
"""Get learned insights about the user."""
|
|
167
|
+
insights = {
|
|
168
|
+
"preferences": self.data.get("preferences", {}),
|
|
169
|
+
"top_tools": sorted(
|
|
170
|
+
self.data.get("tool_frequency", {}).items(),
|
|
171
|
+
key=lambda x: -x[1]
|
|
172
|
+
)[:10],
|
|
173
|
+
"peak_hours": sorted(
|
|
174
|
+
self.data.get("time_patterns", {}).items(),
|
|
175
|
+
key=lambda x: -x[1]
|
|
176
|
+
)[:5],
|
|
177
|
+
"correction_count": len(self.data.get("corrections", [])),
|
|
178
|
+
"retry_count": len(self.data.get("retries", [])),
|
|
179
|
+
"undo_count": len(self.data.get("undos", [])),
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
# Response length preference from feedback
|
|
183
|
+
fb = self.data.get("response_feedback", {})
|
|
184
|
+
if fb.get("too_long", 0) > fb.get("too_short", 0) * 2:
|
|
185
|
+
insights["inferred_preference"] = "User prefers shorter responses"
|
|
186
|
+
elif fb.get("too_short", 0) > fb.get("too_long", 0) * 2:
|
|
187
|
+
insights["inferred_preference"] = "User prefers detailed responses"
|
|
188
|
+
|
|
189
|
+
return insights
|
|
190
|
+
|
|
191
|
+
def format_for_prompt(self) -> str:
|
|
192
|
+
"""Format learned preferences as a prompt injection."""
|
|
193
|
+
prefs = self.data.get("preferences", {})
|
|
194
|
+
if not prefs:
|
|
195
|
+
return ""
|
|
196
|
+
|
|
197
|
+
lines = ["## User Preferences (learned)"]
|
|
198
|
+
if prefs.get("response_length") == "short":
|
|
199
|
+
lines.append("- User prefers SHORT, concise responses")
|
|
200
|
+
elif prefs.get("response_length") == "long":
|
|
201
|
+
lines.append("- User prefers DETAILED, thorough responses")
|
|
202
|
+
if prefs.get("emojis") is False:
|
|
203
|
+
lines.append("- Do NOT use emojis")
|
|
204
|
+
if prefs.get("code_only"):
|
|
205
|
+
lines.append("- User prefers code-only responses, skip explanations")
|
|
206
|
+
if prefs.get("markdown") is False:
|
|
207
|
+
lines.append("- Use plain text, not markdown")
|
|
208
|
+
|
|
209
|
+
return "\n".join(lines) if len(lines) > 1 else ""
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
_learner = None
|
|
213
|
+
|
|
214
|
+
def get_ambient_learner() -> AmbientLearner:
|
|
215
|
+
global _learner
|
|
216
|
+
if _learner is None:
|
|
217
|
+
_learner = AmbientLearner()
|
|
218
|
+
return _learner
|