aethergraph 0.1.0a2__py3-none-any.whl → 0.1.0a4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aethergraph/__main__.py +3 -0
- aethergraph/api/v1/artifacts.py +23 -4
- aethergraph/api/v1/schemas.py +7 -0
- aethergraph/api/v1/session.py +123 -4
- aethergraph/config/config.py +2 -0
- aethergraph/config/search.py +49 -0
- aethergraph/contracts/services/channel.py +18 -1
- aethergraph/contracts/services/execution.py +58 -0
- aethergraph/contracts/services/llm.py +26 -0
- aethergraph/contracts/services/memory.py +10 -4
- aethergraph/contracts/services/planning.py +53 -0
- aethergraph/contracts/storage/event_log.py +8 -0
- aethergraph/contracts/storage/search_backend.py +47 -0
- aethergraph/contracts/storage/vector_index.py +73 -0
- aethergraph/core/graph/action_spec.py +76 -0
- aethergraph/core/graph/graph_fn.py +75 -2
- aethergraph/core/graph/graphify.py +74 -2
- aethergraph/core/runtime/graph_runner.py +2 -1
- aethergraph/core/runtime/node_context.py +66 -3
- aethergraph/core/runtime/node_services.py +8 -0
- aethergraph/core/runtime/run_manager.py +263 -271
- aethergraph/core/runtime/run_types.py +54 -1
- aethergraph/core/runtime/runtime_env.py +35 -14
- aethergraph/core/runtime/runtime_services.py +308 -18
- aethergraph/plugins/agents/default_chat_agent.py +266 -74
- aethergraph/plugins/agents/default_chat_agent_v2.py +487 -0
- aethergraph/plugins/channel/adapters/webui.py +69 -21
- aethergraph/plugins/channel/routes/webui_routes.py +8 -48
- aethergraph/runtime/__init__.py +12 -0
- aethergraph/server/app_factory.py +10 -1
- aethergraph/server/ui_static/assets/index-CFktGdbW.js +4913 -0
- aethergraph/server/ui_static/assets/index-DcfkFlTA.css +1 -0
- aethergraph/server/ui_static/index.html +2 -2
- aethergraph/services/artifacts/facade.py +157 -21
- aethergraph/services/artifacts/types.py +35 -0
- aethergraph/services/artifacts/utils.py +42 -0
- aethergraph/services/channel/channel_bus.py +3 -1
- aethergraph/services/channel/event_hub copy.py +55 -0
- aethergraph/services/channel/event_hub.py +81 -0
- aethergraph/services/channel/factory.py +3 -2
- aethergraph/services/channel/session.py +709 -74
- aethergraph/services/container/default_container.py +69 -7
- aethergraph/services/execution/__init__.py +0 -0
- aethergraph/services/execution/local_python.py +118 -0
- aethergraph/services/indices/__init__.py +0 -0
- aethergraph/services/indices/global_indices.py +21 -0
- aethergraph/services/indices/scoped_indices.py +292 -0
- aethergraph/services/llm/generic_client.py +342 -46
- aethergraph/services/llm/generic_embed_client.py +359 -0
- aethergraph/services/llm/types.py +3 -1
- aethergraph/services/memory/distillers/llm_long_term.py +60 -109
- aethergraph/services/memory/distillers/llm_long_term_v1.py +180 -0
- aethergraph/services/memory/distillers/llm_meta_summary.py +57 -266
- aethergraph/services/memory/distillers/llm_meta_summary_v1.py +342 -0
- aethergraph/services/memory/distillers/long_term.py +48 -131
- aethergraph/services/memory/distillers/long_term_v1.py +170 -0
- aethergraph/services/memory/facade/chat.py +18 -8
- aethergraph/services/memory/facade/core.py +159 -19
- aethergraph/services/memory/facade/distillation.py +86 -31
- aethergraph/services/memory/facade/retrieval.py +100 -1
- aethergraph/services/memory/factory.py +4 -1
- aethergraph/services/planning/__init__.py +0 -0
- aethergraph/services/planning/action_catalog.py +271 -0
- aethergraph/services/planning/bindings.py +56 -0
- aethergraph/services/planning/dependency_index.py +65 -0
- aethergraph/services/planning/flow_validator.py +263 -0
- aethergraph/services/planning/graph_io_adapter.py +150 -0
- aethergraph/services/planning/input_parser.py +312 -0
- aethergraph/services/planning/missing_inputs.py +28 -0
- aethergraph/services/planning/node_planner.py +613 -0
- aethergraph/services/planning/orchestrator.py +112 -0
- aethergraph/services/planning/plan_executor.py +506 -0
- aethergraph/services/planning/plan_types.py +321 -0
- aethergraph/services/planning/planner.py +617 -0
- aethergraph/services/planning/planner_service.py +369 -0
- aethergraph/services/planning/planning_context_builder.py +43 -0
- aethergraph/services/planning/quick_actions.py +29 -0
- aethergraph/services/planning/routers/__init__.py +0 -0
- aethergraph/services/planning/routers/simple_router.py +26 -0
- aethergraph/services/rag/facade.py +0 -3
- aethergraph/services/scope/scope.py +30 -30
- aethergraph/services/scope/scope_factory.py +15 -7
- aethergraph/services/skills/__init__.py +0 -0
- aethergraph/services/skills/skill_registry.py +465 -0
- aethergraph/services/skills/skills.py +220 -0
- aethergraph/services/skills/utils.py +194 -0
- aethergraph/storage/artifacts/artifact_index_jsonl.py +16 -10
- aethergraph/storage/artifacts/artifact_index_sqlite.py +12 -2
- aethergraph/storage/docstore/sqlite_doc_sync.py +1 -1
- aethergraph/storage/memory/event_persist.py +42 -2
- aethergraph/storage/memory/fs_persist.py +32 -2
- aethergraph/storage/search_backend/__init__.py +0 -0
- aethergraph/storage/search_backend/generic_vector_backend.py +230 -0
- aethergraph/storage/search_backend/null_backend.py +34 -0
- aethergraph/storage/search_backend/sqlite_lexical_backend.py +387 -0
- aethergraph/storage/search_backend/utils.py +31 -0
- aethergraph/storage/search_factory.py +75 -0
- aethergraph/storage/vector_index/faiss_index.py +72 -4
- aethergraph/storage/vector_index/sqlite_index.py +521 -52
- aethergraph/storage/vector_index/sqlite_index_vanila.py +311 -0
- aethergraph/storage/vector_index/utils.py +22 -0
- {aethergraph-0.1.0a2.dist-info → aethergraph-0.1.0a4.dist-info}/METADATA +1 -1
- {aethergraph-0.1.0a2.dist-info → aethergraph-0.1.0a4.dist-info}/RECORD +108 -64
- {aethergraph-0.1.0a2.dist-info → aethergraph-0.1.0a4.dist-info}/WHEEL +1 -1
- aethergraph/plugins/agents/default_chat_agent copy.py +0 -90
- aethergraph/server/ui_static/assets/index-BR5GtXcZ.css +0 -1
- aethergraph/server/ui_static/assets/index-CQ0HZZ83.js +0 -400
- aethergraph/services/eventhub/event_hub.py +0 -76
- aethergraph/services/llm/generic_client copy.py +0 -691
- aethergraph/services/prompts/file_store.py +0 -41
- {aethergraph-0.1.0a2.dist-info → aethergraph-0.1.0a4.dist-info}/entry_points.txt +0 -0
- {aethergraph-0.1.0a2.dist-info → aethergraph-0.1.0a4.dist-info}/licenses/LICENSE +0 -0
- {aethergraph-0.1.0a2.dist-info → aethergraph-0.1.0a4.dist-info}/licenses/NOTICE +0 -0
- {aethergraph-0.1.0a2.dist-info → aethergraph-0.1.0a4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,487 @@
|
|
|
1
|
+
# aethergraph/examples/agents/default_chat_agent.py
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
from contextlib import suppress
|
|
7
|
+
import time
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from aethergraph import NodeContext, graph_fn
|
|
11
|
+
|
|
12
|
+
# ---------------------------------------------------------------------------
|
|
13
|
+
# Helpers
|
|
14
|
+
# ---------------------------------------------------------------------------
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _thread_tag(session_id: str | None) -> str | None:
|
|
18
|
+
"""
|
|
19
|
+
Your custom, stable tag contract for "this conversation thread".
|
|
20
|
+
Use something not special-cased by core memory.
|
|
21
|
+
"""
|
|
22
|
+
if not session_id:
|
|
23
|
+
return None
|
|
24
|
+
return f"thread:session:{session_id}"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _semantic_chat_tags(session_id: str | None) -> list[str]:
|
|
28
|
+
"""
|
|
29
|
+
Tags that you control at agent level.
|
|
30
|
+
Keep them semantic + thread-scoped, but not provenance (run/node/channel),
|
|
31
|
+
since provenance can come from channel if you want.
|
|
32
|
+
"""
|
|
33
|
+
tags = ["user.chat"]
|
|
34
|
+
ttag = _thread_tag(session_id)
|
|
35
|
+
if ttag:
|
|
36
|
+
tags.append(ttag)
|
|
37
|
+
return tags
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
async def _get_last_seen_thread(mem) -> str | None:
|
|
41
|
+
# HotLog-only; good enough for "what was the last thread we chatted in?"
|
|
42
|
+
evts = await mem.recent(kinds=["chat.thread_seen"], limit=5)
|
|
43
|
+
for e in reversed(evts): # newest last (per your recent() contract)
|
|
44
|
+
t = (getattr(e, "data", None) or {}).get("thread")
|
|
45
|
+
if isinstance(t, str) and t:
|
|
46
|
+
return t
|
|
47
|
+
return None
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
async def _mark_thread_seen(mem, ttag: str | None) -> None:
|
|
51
|
+
if not ttag:
|
|
52
|
+
return
|
|
53
|
+
# Record as an ordinary event so it lives in user timeline
|
|
54
|
+
await mem.record(
|
|
55
|
+
kind="chat.thread_seen",
|
|
56
|
+
text=ttag,
|
|
57
|
+
data={"thread": ttag},
|
|
58
|
+
tags=["thread_state"],
|
|
59
|
+
severity=1,
|
|
60
|
+
stage="system",
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
async def _emit_handoff_capsule(mem, *, prev_ttag: str, prev_session_id: str | None = None) -> None:
|
|
65
|
+
"""
|
|
66
|
+
Create a lightweight, immediate capsule for the previous thread.
|
|
67
|
+
This is NOT long-term distill; it’s a quick “handoff”.
|
|
68
|
+
"""
|
|
69
|
+
# Pull the tail of the previous thread
|
|
70
|
+
tail = await mem.recent_chat(limit=40, tags=[prev_ttag])
|
|
71
|
+
|
|
72
|
+
if not tail:
|
|
73
|
+
return
|
|
74
|
+
|
|
75
|
+
# Ultra-cheap “summary” without an extra LLM call:
|
|
76
|
+
# keep last few user+assistant messages in compact bullets
|
|
77
|
+
lines: list[str] = []
|
|
78
|
+
for m in tail[-12:]:
|
|
79
|
+
role = m.get("role", "user")
|
|
80
|
+
text = (m.get("text") or "").strip().replace("\n", " ")
|
|
81
|
+
if not text:
|
|
82
|
+
continue
|
|
83
|
+
if len(text) > 180:
|
|
84
|
+
text = text[:180] + "…"
|
|
85
|
+
lines.append(f"{role}: {text}")
|
|
86
|
+
|
|
87
|
+
capsule = "Previous session context (most recent tail):\n" + "\n".join(lines)
|
|
88
|
+
|
|
89
|
+
# Store capsule in user-level memory (NOT thread-scoped), but tagged with prev thread
|
|
90
|
+
await mem.record(
|
|
91
|
+
kind="chat.handoff",
|
|
92
|
+
text=capsule,
|
|
93
|
+
data={"thread": prev_ttag, "session_id": prev_session_id, "text": capsule},
|
|
94
|
+
tags=["handoff", prev_ttag],
|
|
95
|
+
severity=2,
|
|
96
|
+
stage="system",
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
async def _maybe_handoff_on_thread_change(mem, *, session_id: str | None) -> str | None:
|
|
101
|
+
"""
|
|
102
|
+
Detect thread change and emit capsule for previous thread.
|
|
103
|
+
Returns prev_ttag if a change was detected.
|
|
104
|
+
"""
|
|
105
|
+
cur_ttag = _thread_tag(session_id)
|
|
106
|
+
if not cur_ttag:
|
|
107
|
+
return None
|
|
108
|
+
|
|
109
|
+
last = await _get_last_seen_thread(mem)
|
|
110
|
+
if last and last != cur_ttag:
|
|
111
|
+
# Emit capsule for the previous thread right now
|
|
112
|
+
with suppress(Exception):
|
|
113
|
+
await _emit_handoff_capsule(mem, prev_ttag=last, prev_session_id=None)
|
|
114
|
+
return last
|
|
115
|
+
|
|
116
|
+
return None
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
async def _load_recent_handoff(mem, *, limit: int = 1) -> list[str]:
|
|
120
|
+
"""
|
|
121
|
+
Pull most recent handoff capsules from user memory.
|
|
122
|
+
"""
|
|
123
|
+
data = await mem.recent_data(
|
|
124
|
+
kinds=["chat.handoff"],
|
|
125
|
+
tags=["handoff"],
|
|
126
|
+
limit=max(limit * 5, 20),
|
|
127
|
+
)
|
|
128
|
+
# recent_data returns data or text; normalize to strings
|
|
129
|
+
out: list[str] = []
|
|
130
|
+
for x in data:
|
|
131
|
+
if isinstance(x, dict):
|
|
132
|
+
t = x.get("text") or x.get("summary") or ""
|
|
133
|
+
if t:
|
|
134
|
+
out.append(str(t))
|
|
135
|
+
elif isinstance(x, str) and x.strip():
|
|
136
|
+
out.append(x.strip())
|
|
137
|
+
return out[-limit:]
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
async def _maybe_distill_session(mem, session_id: str | None) -> None:
|
|
141
|
+
"""
|
|
142
|
+
Distill a per-session/thread summary, even though overall memory scope is user-level.
|
|
143
|
+
|
|
144
|
+
We select only events from this thread via tag filtering and then write summary docs
|
|
145
|
+
under a filesystem-safe tag path.
|
|
146
|
+
"""
|
|
147
|
+
ttag = _thread_tag(session_id)
|
|
148
|
+
if not ttag:
|
|
149
|
+
return
|
|
150
|
+
|
|
151
|
+
# Pull more than needed and filter locally (keeps memory core unchanged)
|
|
152
|
+
recent = await mem.recent(kinds=["chat.turn"], limit=250)
|
|
153
|
+
recent = [e for e in recent if ttag in set(e.tags or [])]
|
|
154
|
+
|
|
155
|
+
if len(recent) < 80:
|
|
156
|
+
return
|
|
157
|
+
|
|
158
|
+
# Store per-thread summaries under a filesystem-safe summary_tag
|
|
159
|
+
safe_summary_tag = f"thread/session/{session_id}"
|
|
160
|
+
|
|
161
|
+
await mem.distill_long_term(
|
|
162
|
+
summary_tag=safe_summary_tag,
|
|
163
|
+
summary_kind="long_term_summary",
|
|
164
|
+
include_kinds=["chat.turn"],
|
|
165
|
+
include_tags=["chat", ttag], # only this thread
|
|
166
|
+
max_events=200,
|
|
167
|
+
use_llm=False,
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def _should_search_artifacts(
|
|
172
|
+
message: str,
|
|
173
|
+
files: list[Any] | None,
|
|
174
|
+
context_refs: list[dict[str, Any]] | None,
|
|
175
|
+
) -> bool:
|
|
176
|
+
if files or context_refs:
|
|
177
|
+
return True
|
|
178
|
+
msg = (message or "").lower()
|
|
179
|
+
artifact_keywords = [
|
|
180
|
+
"file",
|
|
181
|
+
"document",
|
|
182
|
+
"doc",
|
|
183
|
+
"pdf",
|
|
184
|
+
"report",
|
|
185
|
+
"notebook",
|
|
186
|
+
"log",
|
|
187
|
+
"logs",
|
|
188
|
+
"plot",
|
|
189
|
+
"graph",
|
|
190
|
+
"artifact",
|
|
191
|
+
]
|
|
192
|
+
return any(k in msg for k in artifact_keywords)
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def _format_search_snippets(event_results, artifact_results, max_total: int = 8) -> str:
|
|
196
|
+
lines: list[str] = []
|
|
197
|
+
|
|
198
|
+
for r in event_results:
|
|
199
|
+
meta = getattr(r, "metadata", None) or {}
|
|
200
|
+
kind = meta.get("kind", "event")
|
|
201
|
+
tags = meta.get("tags") or []
|
|
202
|
+
text = meta.get("preview") or ""
|
|
203
|
+
if not text:
|
|
204
|
+
continue
|
|
205
|
+
tag_str = f" tags={','.join(tags[:3])}" if tags else ""
|
|
206
|
+
lines.append(f"- [event:{kind}]{tag_str} {text[:220]}")
|
|
207
|
+
if len(lines) >= max_total:
|
|
208
|
+
break
|
|
209
|
+
|
|
210
|
+
if len(lines) < max_total:
|
|
211
|
+
remaining = max_total - len(lines)
|
|
212
|
+
for r in artifact_results[:remaining]:
|
|
213
|
+
meta = getattr(r, "metadata", None) or {}
|
|
214
|
+
kind = meta.get("kind", "artifact")
|
|
215
|
+
name = (
|
|
216
|
+
meta.get("filename")
|
|
217
|
+
or meta.get("name")
|
|
218
|
+
or meta.get("path")
|
|
219
|
+
or meta.get("uri")
|
|
220
|
+
or r.item_id
|
|
221
|
+
)
|
|
222
|
+
desc = meta.get("description") or meta.get("summary") or ""
|
|
223
|
+
snippet = f"{name}: {desc[:160]}" if desc else name
|
|
224
|
+
lines.append(f"- [artifact:{kind}] {snippet}")
|
|
225
|
+
|
|
226
|
+
return "\n".join(lines)
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
# ---------------------------------------------------------------------------
|
|
230
|
+
# Default chat agent (user memory + thread/session-scoped prompt)
|
|
231
|
+
# ---------------------------------------------------------------------------
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
@graph_fn(
|
|
235
|
+
name="default_chat_agent_user_mem",
|
|
236
|
+
inputs=["message", "files", "context_refs", "session_id", "user_meta"],
|
|
237
|
+
outputs=["reply"],
|
|
238
|
+
as_agent={
|
|
239
|
+
"id": "chat_agent_user_mem",
|
|
240
|
+
"title": "Chat",
|
|
241
|
+
"short_description": "General-purpose chat agent (user memory + thread-scoped prompt).",
|
|
242
|
+
"description": "Uses user-level memory across sessions; prompt history is scoped by a custom thread tag.",
|
|
243
|
+
"icon": "message-circle",
|
|
244
|
+
"color": "sky",
|
|
245
|
+
"session_kind": "chat",
|
|
246
|
+
"mode": "chat_v1",
|
|
247
|
+
"memory_level": "user", # ✅ global user memory scope
|
|
248
|
+
"memory_scope": "global",
|
|
249
|
+
},
|
|
250
|
+
)
|
|
251
|
+
async def default_chat_agent(
|
|
252
|
+
message: str,
|
|
253
|
+
files: list[Any] | None = None,
|
|
254
|
+
session_id: str | None = None,
|
|
255
|
+
user_meta: dict[str, Any] | None = None,
|
|
256
|
+
context_refs: list[dict[str, Any]] | None = None,
|
|
257
|
+
*,
|
|
258
|
+
context: NodeContext,
|
|
259
|
+
):
|
|
260
|
+
logger = context.logger()
|
|
261
|
+
llm = context.llm()
|
|
262
|
+
chan = context.ui_session_channel()
|
|
263
|
+
mem = context.memory()
|
|
264
|
+
indices = context.indices()
|
|
265
|
+
|
|
266
|
+
ttag = _thread_tag(session_id)
|
|
267
|
+
|
|
268
|
+
# Detect thread change and emit a capsule for the previous thread (if any)
|
|
269
|
+
try:
|
|
270
|
+
await _maybe_handoff_on_thread_change(mem, session_id=session_id)
|
|
271
|
+
except Exception:
|
|
272
|
+
logger.debug("handoff capsule failed", exc_info=True)
|
|
273
|
+
|
|
274
|
+
mem_tags = _semantic_chat_tags(session_id)
|
|
275
|
+
|
|
276
|
+
# ------------------------------------------------------------------
|
|
277
|
+
# 1) Layer 1 + 2:
|
|
278
|
+
# - user-level long-term summaries (cross-session)
|
|
279
|
+
# - thread/session-scoped recent chat (tag filter)
|
|
280
|
+
# ------------------------------------------------------------------
|
|
281
|
+
segments = await mem.build_prompt_segments(
|
|
282
|
+
recent_chat_limit=20,
|
|
283
|
+
include_long_term=True,
|
|
284
|
+
summary_tag="user/global", # ✅ user-level summaries
|
|
285
|
+
max_summaries=3,
|
|
286
|
+
include_recent_tools=False,
|
|
287
|
+
# ✅ new generic tag filter; only include messages from this thread
|
|
288
|
+
recent_chat_tags=[ttag] if ttag else None,
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
long_term_summary: str = segments.get("long_term") or ""
|
|
292
|
+
recent_chat: list[dict[str, Any]] = segments.get("recent_chat") or []
|
|
293
|
+
|
|
294
|
+
# ------------------------------------------------------------------
|
|
295
|
+
# 2) Prompt assembly
|
|
296
|
+
# ------------------------------------------------------------------
|
|
297
|
+
system_prompt = (
|
|
298
|
+
"You are AetherGraph's built-in helper.\n\n"
|
|
299
|
+
"You can see:\n"
|
|
300
|
+
"- A long-term summary of the user (across sessions).\n"
|
|
301
|
+
"- A short window of recent messages from this thread.\n"
|
|
302
|
+
"- Optionally, retrieved snippets from events and artifacts.\n\n"
|
|
303
|
+
"Use them to answer questions, but do not invent details.\n"
|
|
304
|
+
"If unsure, say so.\n"
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
messages: list[dict[str, str]] = [{"role": "system", "content": system_prompt}]
|
|
308
|
+
|
|
309
|
+
if long_term_summary:
|
|
310
|
+
messages.append({"role": "system", "content": "User memory summary:\n" + long_term_summary})
|
|
311
|
+
|
|
312
|
+
try:
|
|
313
|
+
handoffs = await _load_recent_handoff(mem, limit=1)
|
|
314
|
+
if handoffs:
|
|
315
|
+
messages.append(
|
|
316
|
+
{
|
|
317
|
+
"role": "system",
|
|
318
|
+
"content": "Recent context from your previous session:\n" + handoffs[0],
|
|
319
|
+
}
|
|
320
|
+
)
|
|
321
|
+
except Exception:
|
|
322
|
+
logger.debug("handoff load failed", exc_info=True)
|
|
323
|
+
|
|
324
|
+
for item in recent_chat:
|
|
325
|
+
role = item.get("role") or "user"
|
|
326
|
+
text = item.get("text") or ""
|
|
327
|
+
mapped_role = role if role in {"user", "assistant", "system"} else "assistant"
|
|
328
|
+
if text:
|
|
329
|
+
messages.append({"role": mapped_role, "content": text})
|
|
330
|
+
|
|
331
|
+
# ------------------------------------------------------------------
|
|
332
|
+
# 3) Layer 3: semantic search (user scope)
|
|
333
|
+
# ------------------------------------------------------------------
|
|
334
|
+
search_snippet_block = ""
|
|
335
|
+
try:
|
|
336
|
+
scope_id = getattr(mem, "memory_scope_id", None) or None
|
|
337
|
+
filters: dict[str, Any] = {}
|
|
338
|
+
if scope_id:
|
|
339
|
+
filters["scope_id"] = scope_id
|
|
340
|
+
|
|
341
|
+
now_ts = time.time()
|
|
342
|
+
created_at_min = now_ts - 90 * 24 * 3600
|
|
343
|
+
created_at_max = now_ts
|
|
344
|
+
|
|
345
|
+
event_results = await indices.search_events(
|
|
346
|
+
query=message,
|
|
347
|
+
top_k=5,
|
|
348
|
+
filters=filters or None,
|
|
349
|
+
created_at_min=created_at_min,
|
|
350
|
+
created_at_max=created_at_max,
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
artifact_results = []
|
|
354
|
+
if _should_search_artifacts(message, files, context_refs):
|
|
355
|
+
artifact_results = await indices.search_artifacts(
|
|
356
|
+
query=message,
|
|
357
|
+
top_k=5,
|
|
358
|
+
filters=filters or None,
|
|
359
|
+
created_at_min=created_at_min,
|
|
360
|
+
created_at_max=created_at_max,
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
search_snippet_block = _format_search_snippets(event_results, artifact_results)
|
|
364
|
+
|
|
365
|
+
except Exception:
|
|
366
|
+
logger.warning("default_chat_agent_user_mem: search backend error", exc_info=True)
|
|
367
|
+
|
|
368
|
+
if search_snippet_block:
|
|
369
|
+
messages.append(
|
|
370
|
+
{
|
|
371
|
+
"role": "system",
|
|
372
|
+
"content": (
|
|
373
|
+
"Retrieved snippets that may be relevant:\n\n"
|
|
374
|
+
f"{search_snippet_block}\n\n"
|
|
375
|
+
"Ignore if irrelevant."
|
|
376
|
+
),
|
|
377
|
+
}
|
|
378
|
+
)
|
|
379
|
+
|
|
380
|
+
# ------------------------------------------------------------------
|
|
381
|
+
# 4) Record user turn (semantic + thread tag), then call LLM
|
|
382
|
+
# ------------------------------------------------------------------
|
|
383
|
+
meta_lines: list[str] = []
|
|
384
|
+
if files:
|
|
385
|
+
meta_lines.append(f"(User attached {len(files)} file(s).)")
|
|
386
|
+
if context_refs:
|
|
387
|
+
meta_lines.append(f"(User attached {len(context_refs)} context reference(s).)")
|
|
388
|
+
|
|
389
|
+
user_content = message + ("\n\n" + "\n".join(meta_lines) if meta_lines else "")
|
|
390
|
+
|
|
391
|
+
user_data: dict[str, Any] = {}
|
|
392
|
+
if files:
|
|
393
|
+
user_data["files"] = [
|
|
394
|
+
{
|
|
395
|
+
"id": getattr(f, "id", None),
|
|
396
|
+
"name": getattr(f, "name", None),
|
|
397
|
+
"mimetype": getattr(f, "mimetype", None),
|
|
398
|
+
"size": getattr(f, "size", None),
|
|
399
|
+
"url": getattr(f, "url", None),
|
|
400
|
+
"uri": getattr(f, "uri", None),
|
|
401
|
+
"extra": getattr(f, "extra", None),
|
|
402
|
+
}
|
|
403
|
+
for f in files
|
|
404
|
+
]
|
|
405
|
+
if context_refs:
|
|
406
|
+
user_data["context_refs"] = context_refs
|
|
407
|
+
|
|
408
|
+
# Record user turn under user memory scope, but tagged by thread
|
|
409
|
+
try:
|
|
410
|
+
await mem.record_chat_user(
|
|
411
|
+
message,
|
|
412
|
+
data=user_data,
|
|
413
|
+
tags=mem_tags,
|
|
414
|
+
)
|
|
415
|
+
except Exception:
|
|
416
|
+
logger.warning("Failed to record user chat message to memory", exc_info=True)
|
|
417
|
+
|
|
418
|
+
messages.append({"role": "user", "content": user_content})
|
|
419
|
+
|
|
420
|
+
# ------------------------------------------------------------------
|
|
421
|
+
# 5) Stream response (assistant turn uses same semantic tags)
|
|
422
|
+
# ------------------------------------------------------------------
|
|
423
|
+
resp = ""
|
|
424
|
+
try:
|
|
425
|
+
try:
|
|
426
|
+
await chan.send_phase(
|
|
427
|
+
phase="reasoning",
|
|
428
|
+
status="active",
|
|
429
|
+
label="LLM call",
|
|
430
|
+
detail="Calling LLM (streaming response)...",
|
|
431
|
+
)
|
|
432
|
+
await asyncio.sleep(0.2)
|
|
433
|
+
await chan.send_phase(
|
|
434
|
+
phase="llm",
|
|
435
|
+
status="active",
|
|
436
|
+
label="Generating",
|
|
437
|
+
detail="LLM is generating the response...",
|
|
438
|
+
)
|
|
439
|
+
except Exception:
|
|
440
|
+
logger.debug("Failed to send phase(active)", exc_info=True)
|
|
441
|
+
|
|
442
|
+
async with chan.stream() as s:
|
|
443
|
+
|
|
444
|
+
async def on_delta(piece: str) -> None:
|
|
445
|
+
await s.delta(piece)
|
|
446
|
+
|
|
447
|
+
resp, usage = await llm.chat_stream(
|
|
448
|
+
messages=messages,
|
|
449
|
+
on_delta=on_delta,
|
|
450
|
+
)
|
|
451
|
+
|
|
452
|
+
memory_data = {"usage": usage} if usage else None
|
|
453
|
+
|
|
454
|
+
# IMPORTANT: use the same tag bundle so prompt filtering works
|
|
455
|
+
await s.end(
|
|
456
|
+
full_text=resp,
|
|
457
|
+
memory_tags=mem_tags,
|
|
458
|
+
memory_data=memory_data,
|
|
459
|
+
)
|
|
460
|
+
|
|
461
|
+
try:
|
|
462
|
+
await chan.send_phase(
|
|
463
|
+
phase="reasoning",
|
|
464
|
+
status="done",
|
|
465
|
+
label="LLM call",
|
|
466
|
+
detail="LLM response finished.",
|
|
467
|
+
)
|
|
468
|
+
except Exception:
|
|
469
|
+
logger.debug("Failed to send phase(done)", exc_info=True)
|
|
470
|
+
|
|
471
|
+
except Exception:
|
|
472
|
+
logger.warning("Failed to stream/log assistant reply via channel", exc_info=True)
|
|
473
|
+
|
|
474
|
+
# ------------------------------------------------------------------
|
|
475
|
+
# 6) Per-thread distillation (optional)
|
|
476
|
+
# ------------------------------------------------------------------
|
|
477
|
+
try:
|
|
478
|
+
await _maybe_distill_session(mem, session_id=session_id)
|
|
479
|
+
except Exception:
|
|
480
|
+
logger.warning("Chat agent memory distill error", exc_info=True)
|
|
481
|
+
|
|
482
|
+
try:
|
|
483
|
+
await _mark_thread_seen(mem, ttag)
|
|
484
|
+
except Exception:
|
|
485
|
+
logger.debug("thread_seen record failed", exc_info=True)
|
|
486
|
+
|
|
487
|
+
return {"reply": resp}
|
|
@@ -7,6 +7,7 @@ import uuid
|
|
|
7
7
|
|
|
8
8
|
from aethergraph.contracts.services.channel import Button, ChannelAdapter, OutEvent
|
|
9
9
|
from aethergraph.contracts.storage.event_log import EventLog
|
|
10
|
+
from aethergraph.services.channel.event_hub import EventHub
|
|
10
11
|
from aethergraph.services.continuations.continuation import Correlator
|
|
11
12
|
|
|
12
13
|
|
|
@@ -21,6 +22,9 @@ class UIChannelEvent:
|
|
|
21
22
|
file: dict[str, Any] | None
|
|
22
23
|
meta: dict[str, Any]
|
|
23
24
|
ts: float
|
|
25
|
+
files: list[dict[str, Any]] | None = None # optional
|
|
26
|
+
rich: dict[str, Any] | None = None # optional
|
|
27
|
+
upsert_key: str | None = None # optional
|
|
24
28
|
|
|
25
29
|
|
|
26
30
|
class WebUIChannelAdapter(ChannelAdapter):
|
|
@@ -33,8 +37,37 @@ class WebUIChannelAdapter(ChannelAdapter):
|
|
|
33
37
|
|
|
34
38
|
capabilities: set[str] = {"text", "buttons", "file", "stream", "edit"}
|
|
35
39
|
|
|
36
|
-
def __init__(self, event_log: EventLog):
|
|
40
|
+
def __init__(self, event_log: EventLog, event_hub: EventHub | None = None) -> None:
|
|
37
41
|
self.event_log = event_log
|
|
42
|
+
self.event_hub = event_hub
|
|
43
|
+
|
|
44
|
+
def _normalize_ui_file(self, file_info: dict[str, Any]) -> dict[str, Any]:
|
|
45
|
+
"""
|
|
46
|
+
WebUI-only decoration for file metadata.
|
|
47
|
+
|
|
48
|
+
- If it looks like an artifact (uri or artifact_id) but has no url,
|
|
49
|
+
we build a relative content endpoint.
|
|
50
|
+
- Preserve renderer/mimetype; frontend will decide how to render.
|
|
51
|
+
"""
|
|
52
|
+
if not file_info:
|
|
53
|
+
return file_info
|
|
54
|
+
|
|
55
|
+
out: dict[str, Any] = dict(file_info)
|
|
56
|
+
|
|
57
|
+
# Prefer explicit artifact_id, but fall back to uri
|
|
58
|
+
artifact_id = out.get("artifact_id") or out.get("uri")
|
|
59
|
+
|
|
60
|
+
# Only set url if caller didn't already set one
|
|
61
|
+
if artifact_id and not out.get("url"):
|
|
62
|
+
out["url"] = f"/artifacts/{artifact_id}/content"
|
|
63
|
+
|
|
64
|
+
# Normalize naming a bit so the UI can be consistent
|
|
65
|
+
if "name" not in out and out.get("filename"):
|
|
66
|
+
out["name"] = out["filename"]
|
|
67
|
+
if "filename" not in out and out.get("name"):
|
|
68
|
+
out["filename"] = out["name"]
|
|
69
|
+
|
|
70
|
+
return out
|
|
38
71
|
|
|
39
72
|
def _extract_target(self, channel_key: str) -> tuple[str, str]:
|
|
40
73
|
"""
|
|
@@ -74,61 +107,76 @@ class WebUIChannelAdapter(ChannelAdapter):
|
|
|
74
107
|
buttons = [self._button_to_dict(b) for b in raw_buttons]
|
|
75
108
|
file_info = getattr(event, "file", None) or None
|
|
76
109
|
|
|
77
|
-
# richer event support
|
|
78
110
|
files = getattr(event, "files", None) or None
|
|
79
111
|
rich = getattr(event, "rich", None) or None
|
|
80
112
|
upsert_key = getattr(event, "upsert_key", None)
|
|
81
113
|
|
|
82
114
|
meta = event.meta or {}
|
|
83
|
-
# Agent_id
|
|
84
|
-
# prefer cononical agent_id; otherwise fall back to legacy field
|
|
85
115
|
agent_id = meta.get("agent_id") or meta.get("agent")
|
|
86
116
|
if agent_id:
|
|
87
117
|
meta["agent_id"] = agent_id
|
|
88
118
|
|
|
89
|
-
# Prefer explicit session_id / run_id from meta when present
|
|
90
119
|
session_id = meta.get("session_id")
|
|
91
120
|
run_id = meta.get("run_id")
|
|
92
121
|
|
|
93
122
|
if scope_kind == "session":
|
|
94
123
|
scope_id = session_id or target_id
|
|
95
124
|
kind = "session_chat"
|
|
96
|
-
else:
|
|
125
|
+
else:
|
|
97
126
|
scope_id = run_id or target_id
|
|
98
127
|
kind = "run_channel"
|
|
99
128
|
|
|
129
|
+
# ------------------------------------------------------------
|
|
130
|
+
# ✅ STREAMING POLICY:
|
|
131
|
+
# - Do NOT persist start/delta
|
|
132
|
+
# - Persist end as a final agent.message
|
|
133
|
+
# ------------------------------------------------------------
|
|
134
|
+
ephemeral_stream_types = {"agent.stream.start", "agent.stream.delta"}
|
|
135
|
+
is_ephemeral_stream = event.type in ephemeral_stream_types
|
|
136
|
+
is_stream_end = event.type == "agent.stream.end"
|
|
137
|
+
|
|
138
|
+
payload_type = event.type
|
|
139
|
+
payload_text = event.text
|
|
140
|
+
|
|
141
|
+
# Persist stream.end as an agent.message (final)
|
|
142
|
+
if is_stream_end:
|
|
143
|
+
payload_type = "agent.message"
|
|
144
|
+
# Mark it so UI/debug can know it came from a stream
|
|
145
|
+
meta = {**meta, "_stream_final": True, "_stream_type": "end"}
|
|
146
|
+
|
|
147
|
+
if file_info is not None:
|
|
148
|
+
file_info = self._normalize_ui_file(file_info)
|
|
149
|
+
|
|
150
|
+
if files is not None:
|
|
151
|
+
files = [self._normalize_ui_file(f) for f in files]
|
|
152
|
+
|
|
100
153
|
row = {
|
|
101
154
|
"id": str(uuid.uuid4()),
|
|
102
155
|
"ts": datetime.now(timezone.utc).timestamp(),
|
|
103
156
|
"scope_id": scope_id,
|
|
104
157
|
"kind": kind,
|
|
105
158
|
"payload": {
|
|
106
|
-
"type":
|
|
107
|
-
"text":
|
|
159
|
+
"type": payload_type,
|
|
160
|
+
"text": payload_text,
|
|
108
161
|
"buttons": buttons,
|
|
109
162
|
"file": file_info,
|
|
110
163
|
"files": files,
|
|
111
164
|
"rich": rich,
|
|
112
165
|
"upsert_key": upsert_key,
|
|
113
166
|
"meta": meta,
|
|
114
|
-
# optional convenience copy:
|
|
115
167
|
"agent_id": meta.get("agent_id"),
|
|
116
168
|
},
|
|
117
169
|
}
|
|
118
|
-
await self.event_log.append(row)
|
|
119
170
|
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
171
|
+
# ✅ Only persist non-ephemeral stream events
|
|
172
|
+
if not is_ephemeral_stream:
|
|
173
|
+
await self.event_log.append(row)
|
|
174
|
+
|
|
175
|
+
# ✅ Always broadcast if hub exists (so streaming works)
|
|
176
|
+
if self.event_hub is not None:
|
|
177
|
+
await self.event_hub.broadcast(row)
|
|
124
178
|
|
|
125
|
-
# Correlator remains run-based for now (session may not map 1-1)
|
|
126
179
|
return {
|
|
127
180
|
"run_id": run_id or target_id,
|
|
128
|
-
"correlator": Correlator(
|
|
129
|
-
scheme="ui",
|
|
130
|
-
channel=event.channel,
|
|
131
|
-
thread="",
|
|
132
|
-
message=None,
|
|
133
|
-
),
|
|
181
|
+
"correlator": Correlator(scheme="ui", channel=event.channel, thread="", message=None),
|
|
134
182
|
}
|