aethergraph 0.1.0a2__py3-none-any.whl → 0.1.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. aethergraph/__main__.py +3 -0
  2. aethergraph/api/v1/artifacts.py +23 -4
  3. aethergraph/api/v1/schemas.py +7 -0
  4. aethergraph/api/v1/session.py +123 -4
  5. aethergraph/config/config.py +2 -0
  6. aethergraph/config/search.py +49 -0
  7. aethergraph/contracts/services/channel.py +18 -1
  8. aethergraph/contracts/services/execution.py +58 -0
  9. aethergraph/contracts/services/llm.py +26 -0
  10. aethergraph/contracts/services/memory.py +10 -4
  11. aethergraph/contracts/services/planning.py +53 -0
  12. aethergraph/contracts/storage/event_log.py +8 -0
  13. aethergraph/contracts/storage/search_backend.py +47 -0
  14. aethergraph/contracts/storage/vector_index.py +73 -0
  15. aethergraph/core/graph/action_spec.py +76 -0
  16. aethergraph/core/graph/graph_fn.py +75 -2
  17. aethergraph/core/graph/graphify.py +74 -2
  18. aethergraph/core/runtime/graph_runner.py +2 -1
  19. aethergraph/core/runtime/node_context.py +66 -3
  20. aethergraph/core/runtime/node_services.py +8 -0
  21. aethergraph/core/runtime/run_manager.py +263 -271
  22. aethergraph/core/runtime/run_types.py +54 -1
  23. aethergraph/core/runtime/runtime_env.py +35 -14
  24. aethergraph/core/runtime/runtime_services.py +308 -18
  25. aethergraph/plugins/agents/default_chat_agent.py +266 -74
  26. aethergraph/plugins/agents/default_chat_agent_v2.py +487 -0
  27. aethergraph/plugins/channel/adapters/webui.py +69 -21
  28. aethergraph/plugins/channel/routes/webui_routes.py +8 -48
  29. aethergraph/runtime/__init__.py +12 -0
  30. aethergraph/server/app_factory.py +10 -1
  31. aethergraph/server/ui_static/assets/index-CFktGdbW.js +4913 -0
  32. aethergraph/server/ui_static/assets/index-DcfkFlTA.css +1 -0
  33. aethergraph/server/ui_static/index.html +2 -2
  34. aethergraph/services/artifacts/facade.py +157 -21
  35. aethergraph/services/artifacts/types.py +35 -0
  36. aethergraph/services/artifacts/utils.py +42 -0
  37. aethergraph/services/channel/channel_bus.py +3 -1
  38. aethergraph/services/channel/event_hub copy.py +55 -0
  39. aethergraph/services/channel/event_hub.py +81 -0
  40. aethergraph/services/channel/factory.py +3 -2
  41. aethergraph/services/channel/session.py +709 -74
  42. aethergraph/services/container/default_container.py +69 -7
  43. aethergraph/services/execution/__init__.py +0 -0
  44. aethergraph/services/execution/local_python.py +118 -0
  45. aethergraph/services/indices/__init__.py +0 -0
  46. aethergraph/services/indices/global_indices.py +21 -0
  47. aethergraph/services/indices/scoped_indices.py +292 -0
  48. aethergraph/services/llm/generic_client.py +342 -46
  49. aethergraph/services/llm/generic_embed_client.py +359 -0
  50. aethergraph/services/llm/types.py +3 -1
  51. aethergraph/services/memory/distillers/llm_long_term.py +60 -109
  52. aethergraph/services/memory/distillers/llm_long_term_v1.py +180 -0
  53. aethergraph/services/memory/distillers/llm_meta_summary.py +57 -266
  54. aethergraph/services/memory/distillers/llm_meta_summary_v1.py +342 -0
  55. aethergraph/services/memory/distillers/long_term.py +48 -131
  56. aethergraph/services/memory/distillers/long_term_v1.py +170 -0
  57. aethergraph/services/memory/facade/chat.py +18 -8
  58. aethergraph/services/memory/facade/core.py +159 -19
  59. aethergraph/services/memory/facade/distillation.py +86 -31
  60. aethergraph/services/memory/facade/retrieval.py +100 -1
  61. aethergraph/services/memory/factory.py +4 -1
  62. aethergraph/services/planning/__init__.py +0 -0
  63. aethergraph/services/planning/action_catalog.py +271 -0
  64. aethergraph/services/planning/bindings.py +56 -0
  65. aethergraph/services/planning/dependency_index.py +65 -0
  66. aethergraph/services/planning/flow_validator.py +263 -0
  67. aethergraph/services/planning/graph_io_adapter.py +150 -0
  68. aethergraph/services/planning/input_parser.py +312 -0
  69. aethergraph/services/planning/missing_inputs.py +28 -0
  70. aethergraph/services/planning/node_planner.py +613 -0
  71. aethergraph/services/planning/orchestrator.py +112 -0
  72. aethergraph/services/planning/plan_executor.py +506 -0
  73. aethergraph/services/planning/plan_types.py +321 -0
  74. aethergraph/services/planning/planner.py +617 -0
  75. aethergraph/services/planning/planner_service.py +369 -0
  76. aethergraph/services/planning/planning_context_builder.py +43 -0
  77. aethergraph/services/planning/quick_actions.py +29 -0
  78. aethergraph/services/planning/routers/__init__.py +0 -0
  79. aethergraph/services/planning/routers/simple_router.py +26 -0
  80. aethergraph/services/rag/facade.py +0 -3
  81. aethergraph/services/scope/scope.py +30 -30
  82. aethergraph/services/scope/scope_factory.py +15 -7
  83. aethergraph/services/skills/__init__.py +0 -0
  84. aethergraph/services/skills/skill_registry.py +465 -0
  85. aethergraph/services/skills/skills.py +220 -0
  86. aethergraph/services/skills/utils.py +194 -0
  87. aethergraph/storage/artifacts/artifact_index_jsonl.py +16 -10
  88. aethergraph/storage/artifacts/artifact_index_sqlite.py +12 -2
  89. aethergraph/storage/docstore/sqlite_doc_sync.py +1 -1
  90. aethergraph/storage/memory/event_persist.py +42 -2
  91. aethergraph/storage/memory/fs_persist.py +32 -2
  92. aethergraph/storage/search_backend/__init__.py +0 -0
  93. aethergraph/storage/search_backend/generic_vector_backend.py +230 -0
  94. aethergraph/storage/search_backend/null_backend.py +34 -0
  95. aethergraph/storage/search_backend/sqlite_lexical_backend.py +387 -0
  96. aethergraph/storage/search_backend/utils.py +31 -0
  97. aethergraph/storage/search_factory.py +75 -0
  98. aethergraph/storage/vector_index/faiss_index.py +72 -4
  99. aethergraph/storage/vector_index/sqlite_index.py +521 -52
  100. aethergraph/storage/vector_index/sqlite_index_vanila.py +311 -0
  101. aethergraph/storage/vector_index/utils.py +22 -0
  102. {aethergraph-0.1.0a2.dist-info → aethergraph-0.1.0a4.dist-info}/METADATA +1 -1
  103. {aethergraph-0.1.0a2.dist-info → aethergraph-0.1.0a4.dist-info}/RECORD +108 -64
  104. {aethergraph-0.1.0a2.dist-info → aethergraph-0.1.0a4.dist-info}/WHEEL +1 -1
  105. aethergraph/plugins/agents/default_chat_agent copy.py +0 -90
  106. aethergraph/server/ui_static/assets/index-BR5GtXcZ.css +0 -1
  107. aethergraph/server/ui_static/assets/index-CQ0HZZ83.js +0 -400
  108. aethergraph/services/eventhub/event_hub.py +0 -76
  109. aethergraph/services/llm/generic_client copy.py +0 -691
  110. aethergraph/services/prompts/file_store.py +0 -41
  111. {aethergraph-0.1.0a2.dist-info → aethergraph-0.1.0a4.dist-info}/entry_points.txt +0 -0
  112. {aethergraph-0.1.0a2.dist-info → aethergraph-0.1.0a4.dist-info}/licenses/LICENSE +0 -0
  113. {aethergraph-0.1.0a2.dist-info → aethergraph-0.1.0a4.dist-info}/licenses/NOTICE +0 -0
  114. {aethergraph-0.1.0a2.dist-info → aethergraph-0.1.0a4.dist-info}/top_level.txt +0 -0
@@ -1,15 +1,124 @@
1
- # aethergraph/examples/agents/default_chat_agent.py (or similar)
1
+ # aethergraph/examples/agents/default_chat_agent.py
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
+ import asyncio
6
+ import time
5
7
  from typing import Any
6
8
 
7
9
  from aethergraph import NodeContext, graph_fn
8
10
 
11
+ # ---------------------------------------------------------------------------
12
+ # Helpers
13
+ # ---------------------------------------------------------------------------
14
+
15
+
16
+ async def _maybe_distill_session(mem) -> None:
17
+ """
18
+ Simple distillation policy (Layer 2 maintenance):
19
+
20
+ - If we have "enough" chat turns, run a long-term summary.
21
+ - Uses non-LLM summarizer by default (use_llm=False).
22
+ The summary is stored in DocStore and also recorded as a memory event
23
+ via `record_raw`, so it becomes searchable by indices.
24
+ """
25
+ recent_for_distill = await mem.recent_chat(limit=120)
26
+ if len(recent_for_distill) < 80:
27
+ return
28
+
29
+ await mem.distill_long_term(
30
+ summary_tag="session",
31
+ summary_kind="long_term_summary",
32
+ include_kinds=["chat.turn"],
33
+ include_tags=["chat"],
34
+ max_events=200,
35
+ use_llm=False,
36
+ )
37
+
38
+
39
+ def _should_search_artifacts(
40
+ message: str,
41
+ files: list[Any] | None,
42
+ context_refs: list[dict[str, Any]] | None,
43
+ ) -> bool:
44
+ """
45
+ Heuristic: when do we bother searching artifacts (files, reports, logs)?
46
+
47
+ - Always, if user attached files or context refs.
48
+ - Otherwise, only if the message looks artifact-oriented.
49
+ """
50
+ if files or context_refs:
51
+ return True
52
+
53
+ msg = (message or "").lower()
54
+ artifact_keywords = [
55
+ "file",
56
+ "document",
57
+ "doc",
58
+ "pdf",
59
+ "report",
60
+ "notebook",
61
+ "log",
62
+ "logs",
63
+ "plot",
64
+ "graph",
65
+ "artifact",
66
+ ]
67
+ return any(k in msg for k in artifact_keywords)
68
+
69
+
70
+ def _format_search_snippets(event_results, artifact_results, max_total: int = 8) -> str:
71
+ """
72
+ Convert search hits (Layer 3) into a compact textual block
73
+ that the LLM can consume.
74
+
75
+ We don't try to be fancy; just short bullet lines with a bit of context.
76
+ """
77
+ lines: list[str] = []
78
+
79
+ # Events first
80
+ for r in event_results:
81
+ meta = getattr(r, "metadata", None) or {}
82
+ kind = meta.get("kind", "event")
83
+ tags = meta.get("tags") or []
84
+ text = meta.get("preview") or ""
85
+
86
+ if not text:
87
+ continue
88
+
89
+ tag_str = f" tags={','.join(tags[:3])}" if tags else ""
90
+ lines.append(f"- [event:{kind}]{tag_str} {text[:220]}")
91
+ if len(lines) >= max_total:
92
+ break
93
+
94
+ # Then artifacts (if we still have budget)
95
+ if len(lines) < max_total:
96
+ remaining = max_total - len(lines)
97
+ for r in artifact_results[:remaining]:
98
+ meta = getattr(r, "metadata", None) or {}
99
+ kind = meta.get("kind", "artifact")
100
+ name = (
101
+ meta.get("filename")
102
+ or meta.get("name")
103
+ or meta.get("path")
104
+ or meta.get("uri")
105
+ or r.item_id
106
+ )
107
+ desc = meta.get("description") or meta.get("summary") or ""
108
+ snippet = f"{name}: {desc[:160]}" if desc else name
109
+ lines.append(f"- [artifact:{kind}] {snippet}")
110
+
111
+ return "\n".join(lines)
112
+
113
+
114
+ # ---------------------------------------------------------------------------
115
+ # Default chat agent with 3-layer memory
116
+ # ---------------------------------------------------------------------------
117
+
9
118
 
10
119
  @graph_fn(
11
120
  name="default_chat_agent",
12
- inputs=["message", "files", "session_id", "user_meta"],
121
+ inputs=["message", "files", "context_refs", "session_id", "user_meta"],
13
122
  outputs=["reply"],
14
123
  as_agent={
15
124
  "id": "chat_agent",
@@ -34,30 +143,18 @@ async def default_chat_agent(
34
143
  context: NodeContext,
35
144
  ):
36
145
  """
37
- Built-in chat agent with session memory:
38
-
39
- - Hydrates long-term + recent chat memory into the prompt.
40
- - Records user and assistant messages as chat.turn events.
41
- - Periodically distills chat history into long-term summaries.
146
+ Built-in chat agent with 3-layer session memory: Recency, Long-term summaries, Semantic search.
42
147
  """
43
148
 
149
+ logger = context.logger()
44
150
  llm = context.llm()
45
151
  chan = context.ui_session_channel()
46
-
47
152
  mem = context.memory()
153
+ indices = context.indices() # ScopedIndices
48
154
 
49
- # 1) Build memory segments for this session
50
- long_term_summary: str = ""
51
- recent_chat: list[dict[str, Any]] = []
52
-
53
- """
54
- Build prompt segments:
55
- {
56
- "long_term": "<combined summary text or ''>",
57
- "recent_chat": [ {ts, role, text, tags}, ... ],
58
- "recent_tools": [ {ts, tool, message, inputs, outputs, tags}, ... ]
59
- }
60
- """
155
+ # ------------------------------------------------------------------
156
+ # 1) Layer 1 + 2: recency + long-term summaries
157
+ # ------------------------------------------------------------------
61
158
  segments = await mem.build_prompt_segments(
62
159
  recent_chat_limit=20,
63
160
  include_long_term=True,
@@ -65,23 +162,28 @@ async def default_chat_agent(
65
162
  max_summaries=3,
66
163
  include_recent_tools=False,
67
164
  )
68
- long_term_summary = segments.get("long_term") or ""
69
- recent_chat = segments.get("recent_chat") or []
70
165
 
71
- # 2) System prompt
166
+ long_term_summary: str = segments.get("long_term") or ""
167
+ recent_chat: list[dict[str, Any]] = segments.get("recent_chat") or []
168
+
169
+ # ------------------------------------------------------------------
170
+ # 2) Base system prompt + memory-conditioned history
171
+ # ------------------------------------------------------------------
72
172
  system_prompt = (
73
173
  "You are AetherGraph's built-in session helper.\n\n"
74
- "You can see a summary of the session and some recent messages.\n"
75
- "Use them to answer questions about previous steps or runs, but do not invent details.\n"
174
+ "You can see:\n"
175
+ "- A long-term summary of the session (distilled from prior turns).\n"
176
+ "- A short window of recent chat messages.\n"
177
+ "- Optionally, semantically retrieved snippets from past events "
178
+ " and artifacts.\n\n"
179
+ "Use them to answer questions about previous steps or runs, "
180
+ "but do not invent details.\n"
76
181
  "If you are unsure, say that clearly.\n"
77
- # "When returning math or code snippets, use markdown formatting.\n"
78
182
  )
79
183
 
80
- messages: list[dict[str, str]] = [
81
- {"role": "system", "content": system_prompt},
82
- ]
184
+ messages: list[dict[str, str]] = [{"role": "system", "content": system_prompt}]
83
185
 
84
- # Inject long-term summary as a system message (if present)
186
+ # Long-term summary (Layer 2 as plain text)
85
187
  if long_term_summary:
86
188
  messages.append(
87
189
  {
@@ -90,82 +192,172 @@ async def default_chat_agent(
90
192
  }
91
193
  )
92
194
 
93
- # Inject recent chat as prior turns
195
+ # Recent chat turns (Layer 1)
94
196
  for item in recent_chat:
95
197
  role = item.get("role") or "user"
96
198
  text = item.get("text") or ""
97
- # Map non-standard roles (e.g. "tool") to "assistant" for chat APIs
98
199
  mapped_role = role if role in {"user", "assistant", "system"} else "assistant"
99
200
  if text:
100
201
  messages.append({"role": mapped_role, "content": text})
101
202
 
102
- # Add some lightweight metadata about files / context refs into the user message
203
+ # ------------------------------------------------------------------
204
+ # 3) Layer 3: semantic search over events + artifacts
205
+ # ------------------------------------------------------------------
206
+ search_snippet_block = ""
207
+ try:
208
+ # Scope-aware filtering: prefer this memory scope if present
209
+ scope_id = getattr(mem, "memory_scope_id", None) or None
210
+ filters: dict[str, Any] = {}
211
+ if scope_id:
212
+ filters["scope_id"] = scope_id
213
+
214
+ now_ts = time.time()
215
+ # Example: look back up to ~90 days. You can adjust this.
216
+ created_at_min = now_ts - 90 * 24 * 3600
217
+ created_at_max = now_ts
218
+
219
+ # Always search events with the user's message as query (cheap, high value).
220
+ event_results = await indices.search_events(
221
+ query=message,
222
+ top_k=5,
223
+ filters=filters or None,
224
+ created_at_min=created_at_min,
225
+ created_at_max=created_at_max,
226
+ )
227
+
228
+ # Search artifacts only when the message/files/context suggests it.
229
+ artifact_results = []
230
+ if _should_search_artifacts(message, files, context_refs):
231
+ artifact_results = await indices.search_artifacts(
232
+ query=message,
233
+ top_k=5,
234
+ filters=filters or None,
235
+ created_at_min=created_at_min,
236
+ created_at_max=created_at_max,
237
+ )
238
+
239
+ search_snippet_block = _format_search_snippets(event_results, artifact_results)
240
+
241
+ except Exception:
242
+ # If search backend is misconfigured or fails, do not break chat.
243
+ logger.warning("default_chat_agent: search backend error", exc_info=True)
244
+ search_snippet_block = ""
245
+
246
+ if search_snippet_block:
247
+ messages.append(
248
+ {
249
+ "role": "system",
250
+ "content": (
251
+ "Retrieved memory snippets and artifacts that may be relevant "
252
+ "to the user's current question:\n\n"
253
+ f"{search_snippet_block}\n\n"
254
+ "If they are not relevant, you may ignore them."
255
+ ),
256
+ }
257
+ )
258
+
259
+ # ------------------------------------------------------------------
260
+ # 4) Build user message (with lightweight metadata hints for LLM)
261
+ # ------------------------------------------------------------------
103
262
  meta_lines: list[str] = []
104
263
  if files:
105
264
  meta_lines.append(f"(User attached {len(files)} file(s).)")
106
265
  if context_refs:
107
266
  meta_lines.append(f"(User attached {len(context_refs)} context reference(s).)")
267
+
108
268
  meta_block = ""
109
269
  if meta_lines:
110
270
  meta_block = "\n\n" + "\n".join(meta_lines)
111
271
 
112
272
  user_content = f"{message}{meta_block}"
113
273
 
114
- # 3) Record the user message into memory
274
+ # Record user turn into memory (this becomes part of Layer 1 + 3 later)
115
275
  user_data: dict[str, Any] = {}
116
276
  if files:
117
- # Store only lightweight file metadata; avoid huge payloads
118
277
  user_data["files"] = [
119
- {k: v for k, v in (f or {}).items() if k in {"name", "url", "mimetype", "size"}}
278
+ {
279
+ "id": getattr(f, "id", None),
280
+ "name": getattr(f, "name", None),
281
+ "mimetype": getattr(f, "mimetype", None),
282
+ "size": getattr(f, "size", None),
283
+ "url": getattr(f, "url", None),
284
+ "uri": getattr(f, "uri", None),
285
+ "extra": getattr(f, "extra", None),
286
+ }
120
287
  for f in files
121
288
  ]
122
289
  if context_refs:
123
290
  user_data["context_refs"] = context_refs
124
291
 
125
- await mem.record_chat_user(
126
- message,
127
- data=user_data,
128
- tags=["session.chat"],
129
- )
292
+ try:
293
+ await mem.record_chat_user(
294
+ message,
295
+ data=user_data,
296
+ tags=["session.chat"],
297
+ )
298
+ except Exception:
299
+ logger.warning("Failed to record user chat message to memory", exc_info=True)
130
300
 
131
- # Append current user message to LLM prompt
301
+ # Append current user turn to prompt
132
302
  messages.append({"role": "user", "content": user_content})
133
- # 4) Call LLM with chat-style API
134
- resp, _usage = await llm.chat(
135
- messages=messages,
136
- )
137
303
 
138
- # 5) Record assistant reply into memory and run simple distillation policy
139
304
  try:
140
- await mem.record_chat_assistant(
141
- resp,
142
- tags=["session.chat"],
143
- )
305
+ # Mark the "reasoning" phase as active before calling the LLM
306
+ try:
307
+ await chan.send_phase(
308
+ phase="thinking",
309
+ status="active",
310
+ label="LLM call",
311
+ detail="Calling LLM (streaming response)...",
312
+ )
313
+
314
+ await asyncio.sleep(0.5) # slight delay to ensure phase event ordering
315
+
316
+ except Exception:
317
+ logger.debug("Failed to send LLM phase(active) state", exc_info=True)
318
+
319
+ async with chan.stream() as s:
320
+ # Hook for streaming deltas into the same message
321
+ async def on_delta(piece: str) -> None:
322
+ await s.delta(piece)
144
323
 
145
- # Simple distillation policy:
146
- # If we have "enough" chat turns in recent history, run a long-term summary.
147
- recent_for_distill = await mem.recent_chat(limit=120)
148
- if len(recent_for_distill) >= 80:
149
- # Non-LLM summarizer by default; flip use_llm=True later.
150
- await mem.distill_long_term(
151
- summary_tag="session",
152
- summary_kind="long_term_summary",
153
- include_kinds=["chat.turn"],
154
- include_tags=["chat"],
155
- max_events=200,
156
- use_llm=False,
324
+ # Streaming LLM call
325
+ resp, usage = await llm.chat_stream(
326
+ messages=messages,
327
+ on_delta=on_delta,
157
328
  )
158
- except Exception:
159
- # Memory issues should never break the chat agent
160
- import traceback
161
329
 
162
- trace = traceback.format_exc()
163
- logger = context.logger()
164
- logger.warning("Chat agent memory record/distill error:\n" + trace)
330
+ # Finalize streaming + memory
331
+ memory_data = {"usage": usage} if usage else None
332
+ await s.end(
333
+ full_text=resp,
334
+ memory_tags=["session.chat"],
335
+ memory_data=memory_data,
336
+ )
165
337
 
166
- # 6) Send reply to UI channel
167
- await chan.send_text(resp)
338
+ # Mark the "reasoning" phase as done
339
+ try:
340
+ await chan.send_phase(
341
+ phase="reasoning",
342
+ status="done",
343
+ label="LLM call",
344
+ detail="LLM response finished.",
345
+ )
346
+ except Exception:
347
+ logger.debug("Failed to send LLM phase(done) state", exc_info=True)
348
+
349
+ except Exception:
350
+ logger.warning(
351
+ "Failed to stream/log assistant reply via channel",
352
+ exc_info=True,
353
+ )
354
+
355
+ # ------------------------------------------------------------------
356
+ # 7) Periodic long-term distillation (maintains Layer 2)
357
+ # ------------------------------------------------------------------
358
+ try:
359
+ await _maybe_distill_session(mem)
360
+ except Exception:
361
+ logger.warning("Chat agent memory distill error", exc_info=True)
168
362
 
169
- return {
170
- "reply": resp,
171
- }
363
+ return {"reply": resp}