aethergraph 0.1.0a1__py3-none-any.whl → 0.1.0a3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aethergraph/__init__.py +4 -10
- aethergraph/__main__.py +296 -0
- aethergraph/api/v1/__init__.py +0 -0
- aethergraph/api/v1/agents.py +46 -0
- aethergraph/api/v1/apps.py +70 -0
- aethergraph/api/v1/artifacts.py +415 -0
- aethergraph/api/v1/channels.py +89 -0
- aethergraph/api/v1/deps.py +168 -0
- aethergraph/api/v1/graphs.py +259 -0
- aethergraph/api/v1/identity.py +25 -0
- aethergraph/api/v1/memory.py +353 -0
- aethergraph/api/v1/misc.py +47 -0
- aethergraph/api/v1/pagination.py +29 -0
- aethergraph/api/v1/runs.py +568 -0
- aethergraph/api/v1/schemas.py +535 -0
- aethergraph/api/v1/session.py +323 -0
- aethergraph/api/v1/stats.py +201 -0
- aethergraph/api/v1/viz.py +152 -0
- aethergraph/config/config.py +22 -0
- aethergraph/config/loader.py +3 -2
- aethergraph/config/storage.py +209 -0
- aethergraph/contracts/__init__.py +0 -0
- aethergraph/contracts/services/__init__.py +0 -0
- aethergraph/contracts/services/artifacts.py +27 -14
- aethergraph/contracts/services/memory.py +45 -17
- aethergraph/contracts/services/metering.py +129 -0
- aethergraph/contracts/services/runs.py +50 -0
- aethergraph/contracts/services/sessions.py +87 -0
- aethergraph/contracts/services/state_stores.py +3 -0
- aethergraph/contracts/services/viz.py +44 -0
- aethergraph/contracts/storage/artifact_index.py +88 -0
- aethergraph/contracts/storage/artifact_store.py +99 -0
- aethergraph/contracts/storage/async_kv.py +34 -0
- aethergraph/contracts/storage/blob_store.py +50 -0
- aethergraph/contracts/storage/doc_store.py +35 -0
- aethergraph/contracts/storage/event_log.py +31 -0
- aethergraph/contracts/storage/vector_index.py +48 -0
- aethergraph/core/__init__.py +0 -0
- aethergraph/core/execution/forward_scheduler.py +13 -2
- aethergraph/core/execution/global_scheduler.py +21 -15
- aethergraph/core/execution/step_forward.py +10 -1
- aethergraph/core/graph/__init__.py +0 -0
- aethergraph/core/graph/graph_builder.py +8 -4
- aethergraph/core/graph/graph_fn.py +156 -15
- aethergraph/core/graph/graph_spec.py +8 -0
- aethergraph/core/graph/graphify.py +146 -27
- aethergraph/core/graph/node_spec.py +0 -2
- aethergraph/core/graph/node_state.py +3 -0
- aethergraph/core/graph/task_graph.py +39 -1
- aethergraph/core/runtime/__init__.py +0 -0
- aethergraph/core/runtime/ad_hoc_context.py +64 -4
- aethergraph/core/runtime/base_service.py +28 -4
- aethergraph/core/runtime/execution_context.py +13 -15
- aethergraph/core/runtime/graph_runner.py +222 -37
- aethergraph/core/runtime/node_context.py +510 -6
- aethergraph/core/runtime/node_services.py +12 -5
- aethergraph/core/runtime/recovery.py +15 -1
- aethergraph/core/runtime/run_manager.py +783 -0
- aethergraph/core/runtime/run_manager_local.py +204 -0
- aethergraph/core/runtime/run_registration.py +2 -2
- aethergraph/core/runtime/run_types.py +89 -0
- aethergraph/core/runtime/runtime_env.py +136 -7
- aethergraph/core/runtime/runtime_metering.py +71 -0
- aethergraph/core/runtime/runtime_registry.py +36 -13
- aethergraph/core/runtime/runtime_services.py +194 -6
- aethergraph/core/tools/builtins/toolset.py +1 -1
- aethergraph/core/tools/toolkit.py +5 -0
- aethergraph/plugins/agents/default_chat_agent copy.py +90 -0
- aethergraph/plugins/agents/default_chat_agent.py +171 -0
- aethergraph/plugins/agents/shared.py +81 -0
- aethergraph/plugins/channel/adapters/webui.py +112 -112
- aethergraph/plugins/channel/routes/webui_routes.py +367 -102
- aethergraph/plugins/channel/utils/slack_utils.py +115 -59
- aethergraph/plugins/channel/utils/telegram_utils.py +88 -47
- aethergraph/plugins/channel/websockets/weibui_ws.py +172 -0
- aethergraph/runtime/__init__.py +15 -0
- aethergraph/server/app_factory.py +196 -34
- aethergraph/server/clients/channel_client.py +202 -0
- aethergraph/server/http/channel_http_routes.py +116 -0
- aethergraph/server/http/channel_ws_routers.py +45 -0
- aethergraph/server/loading.py +117 -0
- aethergraph/server/server.py +131 -0
- aethergraph/server/server_state.py +240 -0
- aethergraph/server/start.py +227 -66
- aethergraph/server/ui_static/assets/KaTeX_AMS-Regular-BQhdFMY1.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_AMS-Regular-DMm9YOAa.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_AMS-Regular-DRggAlZN.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Caligraphic-Bold-ATXxdsX0.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Caligraphic-Bold-BEiXGLvX.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Caligraphic-Bold-Dq_IR9rO.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Caligraphic-Regular-CTRA-rTL.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Caligraphic-Regular-Di6jR-x-.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Caligraphic-Regular-wX97UBjC.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Fraktur-Bold-BdnERNNW.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Fraktur-Bold-BsDP51OF.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Fraktur-Bold-CL6g_b3V.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Fraktur-Regular-CB_wures.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Fraktur-Regular-CTYiF6lA.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Fraktur-Regular-Dxdc4cR9.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Main-Bold-Cx986IdX.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Main-Bold-Jm3AIy58.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Main-Bold-waoOVXN0.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Main-BoldItalic-DxDJ3AOS.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Main-BoldItalic-DzxPMmG6.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Main-BoldItalic-SpSLRI95.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Main-Italic-3WenGoN9.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Main-Italic-BMLOBm91.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Main-Italic-NWA7e6Wa.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Main-Regular-B22Nviop.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Main-Regular-Dr94JaBh.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Main-Regular-ypZvNtVU.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Math-BoldItalic-B3XSjfu4.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Math-BoldItalic-CZnvNsCZ.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Math-BoldItalic-iY-2wyZ7.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Math-Italic-DA0__PXp.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Math-Italic-flOr_0UB.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Math-Italic-t53AETM-.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_SansSerif-Bold-CFMepnvq.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_SansSerif-Bold-D1sUS0GD.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_SansSerif-Bold-DbIhKOiC.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_SansSerif-Italic-C3H0VqGB.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_SansSerif-Italic-DN2j7dab.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_SansSerif-Italic-YYjJ1zSn.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_SansSerif-Regular-BNo7hRIc.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_SansSerif-Regular-CS6fqUqJ.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_SansSerif-Regular-DDBCnlJ7.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Script-Regular-C5JkGWo-.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Script-Regular-D3wIWfF6.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Script-Regular-D5yQViql.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Size1-Regular-C195tn64.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Size1-Regular-Dbsnue_I.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Size1-Regular-mCD8mA8B.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Size2-Regular-B7gKUWhC.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Size2-Regular-Dy4dx90m.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Size2-Regular-oD1tc_U0.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Size3-Regular-CTq5MqoE.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Size3-Regular-DgpXs0kz.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Size4-Regular-BF-4gkZK.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Size4-Regular-DWFBv043.ttf +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Size4-Regular-Dl5lxZxV.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Typewriter-Regular-C0xS9mPB.woff +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Typewriter-Regular-CO6r4hn1.woff2 +0 -0
- aethergraph/server/ui_static/assets/KaTeX_Typewriter-Regular-D3Ib7_Hf.ttf +0 -0
- aethergraph/server/ui_static/assets/index-BR5GtXcZ.css +1 -0
- aethergraph/server/ui_static/assets/index-CQ0HZZ83.js +400 -0
- aethergraph/server/ui_static/index.html +15 -0
- aethergraph/server/ui_static/logo.png +0 -0
- aethergraph/services/artifacts/__init__.py +0 -0
- aethergraph/services/artifacts/facade.py +1239 -132
- aethergraph/services/auth/{dev.py → authn.py} +0 -8
- aethergraph/services/auth/authz.py +100 -0
- aethergraph/services/channel/__init__.py +0 -0
- aethergraph/services/channel/channel_bus.py +19 -1
- aethergraph/services/channel/factory.py +13 -1
- aethergraph/services/channel/ingress.py +311 -0
- aethergraph/services/channel/queue_adapter.py +75 -0
- aethergraph/services/channel/session.py +502 -19
- aethergraph/services/container/default_container.py +122 -43
- aethergraph/services/continuations/continuation.py +6 -0
- aethergraph/services/continuations/stores/fs_store.py +19 -0
- aethergraph/services/eventhub/event_hub.py +76 -0
- aethergraph/services/kv/__init__.py +0 -0
- aethergraph/services/kv/ephemeral.py +244 -0
- aethergraph/services/llm/__init__.py +0 -0
- aethergraph/services/llm/generic_client copy.py +691 -0
- aethergraph/services/llm/generic_client.py +1288 -187
- aethergraph/services/llm/providers.py +3 -1
- aethergraph/services/llm/types.py +47 -0
- aethergraph/services/llm/utils.py +284 -0
- aethergraph/services/logger/std.py +3 -0
- aethergraph/services/mcp/__init__.py +9 -0
- aethergraph/services/mcp/http_client.py +38 -0
- aethergraph/services/mcp/service.py +225 -1
- aethergraph/services/mcp/stdio_client.py +41 -6
- aethergraph/services/mcp/ws_client.py +44 -2
- aethergraph/services/memory/__init__.py +0 -0
- aethergraph/services/memory/distillers/llm_long_term.py +234 -0
- aethergraph/services/memory/distillers/llm_meta_summary.py +398 -0
- aethergraph/services/memory/distillers/long_term.py +225 -0
- aethergraph/services/memory/facade/__init__.py +3 -0
- aethergraph/services/memory/facade/chat.py +440 -0
- aethergraph/services/memory/facade/core.py +447 -0
- aethergraph/services/memory/facade/distillation.py +424 -0
- aethergraph/services/memory/facade/rag.py +410 -0
- aethergraph/services/memory/facade/results.py +315 -0
- aethergraph/services/memory/facade/retrieval.py +139 -0
- aethergraph/services/memory/facade/types.py +77 -0
- aethergraph/services/memory/facade/utils.py +43 -0
- aethergraph/services/memory/facade_dep.py +1539 -0
- aethergraph/services/memory/factory.py +9 -3
- aethergraph/services/memory/utils.py +10 -0
- aethergraph/services/metering/eventlog_metering.py +470 -0
- aethergraph/services/metering/noop.py +25 -4
- aethergraph/services/rag/__init__.py +0 -0
- aethergraph/services/rag/facade.py +279 -23
- aethergraph/services/rag/index_factory.py +2 -2
- aethergraph/services/rag/node_rag.py +317 -0
- aethergraph/services/rate_limit/inmem_rate_limit.py +24 -0
- aethergraph/services/registry/__init__.py +0 -0
- aethergraph/services/registry/agent_app_meta.py +419 -0
- aethergraph/services/registry/registry_key.py +1 -1
- aethergraph/services/registry/unified_registry.py +74 -6
- aethergraph/services/scope/scope.py +159 -0
- aethergraph/services/scope/scope_factory.py +164 -0
- aethergraph/services/state_stores/serialize.py +5 -0
- aethergraph/services/state_stores/utils.py +2 -1
- aethergraph/services/viz/__init__.py +0 -0
- aethergraph/services/viz/facade.py +413 -0
- aethergraph/services/viz/viz_service.py +69 -0
- aethergraph/storage/artifacts/artifact_index_jsonl.py +180 -0
- aethergraph/storage/artifacts/artifact_index_sqlite.py +426 -0
- aethergraph/storage/artifacts/cas_store.py +422 -0
- aethergraph/storage/artifacts/fs_cas.py +18 -0
- aethergraph/storage/artifacts/s3_cas.py +14 -0
- aethergraph/storage/artifacts/utils.py +124 -0
- aethergraph/storage/blob/fs_blob.py +86 -0
- aethergraph/storage/blob/s3_blob.py +115 -0
- aethergraph/storage/continuation_store/fs_cont.py +283 -0
- aethergraph/storage/continuation_store/inmem_cont.py +146 -0
- aethergraph/storage/continuation_store/kvdoc_cont.py +261 -0
- aethergraph/storage/docstore/fs_doc.py +63 -0
- aethergraph/storage/docstore/sqlite_doc.py +31 -0
- aethergraph/storage/docstore/sqlite_doc_sync.py +90 -0
- aethergraph/storage/eventlog/fs_event.py +136 -0
- aethergraph/storage/eventlog/sqlite_event.py +47 -0
- aethergraph/storage/eventlog/sqlite_event_sync.py +178 -0
- aethergraph/storage/factory.py +432 -0
- aethergraph/storage/fs_utils.py +28 -0
- aethergraph/storage/graph_state_store/state_store.py +64 -0
- aethergraph/storage/kv/inmem_kv.py +103 -0
- aethergraph/storage/kv/layered_kv.py +52 -0
- aethergraph/storage/kv/sqlite_kv.py +39 -0
- aethergraph/storage/kv/sqlite_kv_sync.py +98 -0
- aethergraph/storage/memory/event_persist.py +68 -0
- aethergraph/storage/memory/fs_persist.py +118 -0
- aethergraph/{services/memory/hotlog_kv.py → storage/memory/hotlog.py} +8 -2
- aethergraph/{services → storage}/memory/indices.py +31 -7
- aethergraph/storage/metering/meter_event.py +55 -0
- aethergraph/storage/runs/doc_store.py +280 -0
- aethergraph/storage/runs/inmen_store.py +82 -0
- aethergraph/storage/runs/sqlite_run_store.py +403 -0
- aethergraph/storage/sessions/doc_store.py +183 -0
- aethergraph/storage/sessions/inmem_store.py +110 -0
- aethergraph/storage/sessions/sqlite_session_store.py +399 -0
- aethergraph/storage/vector_index/chroma_index.py +138 -0
- aethergraph/storage/vector_index/faiss_index.py +179 -0
- aethergraph/storage/vector_index/sqlite_index.py +187 -0
- {aethergraph-0.1.0a1.dist-info → aethergraph-0.1.0a3.dist-info}/METADATA +138 -31
- aethergraph-0.1.0a3.dist-info/RECORD +356 -0
- aethergraph-0.1.0a3.dist-info/entry_points.txt +3 -0
- aethergraph/services/artifacts/factory.py +0 -35
- aethergraph/services/artifacts/fs_store.py +0 -656
- aethergraph/services/artifacts/jsonl_index.py +0 -123
- aethergraph/services/artifacts/sqlite_index.py +0 -209
- aethergraph/services/memory/distillers/episode.py +0 -116
- aethergraph/services/memory/distillers/rolling.py +0 -74
- aethergraph/services/memory/facade.py +0 -633
- aethergraph/services/memory/persist_fs.py +0 -40
- aethergraph/services/rag/index/base.py +0 -27
- aethergraph/services/rag/index/faiss_index.py +0 -121
- aethergraph/services/rag/index/sqlite_index.py +0 -134
- aethergraph-0.1.0a1.dist-info/RECORD +0 -182
- aethergraph-0.1.0a1.dist-info/entry_points.txt +0 -2
- {aethergraph-0.1.0a1.dist-info → aethergraph-0.1.0a3.dist-info}/WHEEL +0 -0
- {aethergraph-0.1.0a1.dist-info → aethergraph-0.1.0a3.dist-info}/licenses/LICENSE +0 -0
- {aethergraph-0.1.0a1.dist-info → aethergraph-0.1.0a3.dist-info}/licenses/NOTICE +0 -0
- {aethergraph-0.1.0a1.dist-info → aethergraph-0.1.0a3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,398 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Iterable
|
|
4
|
+
import json
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from aethergraph.contracts.services.llm import LLMClientProtocol
|
|
8
|
+
from aethergraph.contracts.services.memory import Distiller, Event, HotLog, Indices, Persistence
|
|
9
|
+
from aethergraph.contracts.storage.doc_store import DocStore
|
|
10
|
+
from aethergraph.core.runtime.runtime_metering import current_meter_context, current_metering
|
|
11
|
+
from aethergraph.services.memory.distillers.long_term import ar_summary_uri
|
|
12
|
+
from aethergraph.services.memory.facade.utils import now_iso, stable_event_id
|
|
13
|
+
from aethergraph.services.memory.utils import _summary_doc_id, _summary_prefix
|
|
14
|
+
|
|
15
|
+
"""
|
|
16
|
+
Meta-summary pipeline (multi-scale memory):
|
|
17
|
+
|
|
18
|
+
1) Raw events (chat_user / chat_assistant) are recorded via `mem.record(...)`.
|
|
19
|
+
2) `mem.distill_long_term(...)` compresses recent events into JSON summaries under:
|
|
20
|
+
mem/<scope_id>/summaries/<summary_tag>/...
|
|
21
|
+
e.g. summary_tag="session" → session-level long-term summaries.
|
|
22
|
+
3) `mem.distill_meta_summary(...)` loads those saved summaries from disk and asks the LLM
|
|
23
|
+
to produce a higher-level "summary of summaries" (meta summary), written under:
|
|
24
|
+
mem/<scope_id>/summaries/<meta_tag>/...
|
|
25
|
+
|
|
26
|
+
ASCII view:
|
|
27
|
+
|
|
28
|
+
[events in HotLog + Persistence]
|
|
29
|
+
│
|
|
30
|
+
▼
|
|
31
|
+
distill_long_term(...)
|
|
32
|
+
│
|
|
33
|
+
▼
|
|
34
|
+
file://mem/<scope>/summaries/session/*.json (long_term_summary)
|
|
35
|
+
│
|
|
36
|
+
▼
|
|
37
|
+
distill_meta_summary(...)
|
|
38
|
+
│
|
|
39
|
+
▼
|
|
40
|
+
file://mem/<scope>/summaries/meta/*.json (meta_summary: summary of summaries)
|
|
41
|
+
|
|
42
|
+
You control time scales via `summary_tag` (e.g. "session", "weekly", "meta") and
|
|
43
|
+
`scope_id` (e.g. user+persona).
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class LLMMetaSummaryDistiller(Distiller):
|
|
48
|
+
"""
|
|
49
|
+
LLM-based "summary of summaries" distiller.
|
|
50
|
+
|
|
51
|
+
Intended use:
|
|
52
|
+
- Input: previously generated summary Events (e.g. kind="long_term_summary").
|
|
53
|
+
- Output: higher-level meta summary (e.g. kind="meta_summary") for a broader time scale.
|
|
54
|
+
|
|
55
|
+
Example:
|
|
56
|
+
- Source: summary_tag="session" (daily/session summaries)
|
|
57
|
+
- Target: summary_tag="meta" (multi-session / weekly/monthly view)
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
def __init__(
|
|
61
|
+
self,
|
|
62
|
+
*,
|
|
63
|
+
llm: LLMClientProtocol,
|
|
64
|
+
# Source summaries (what we are compressing)
|
|
65
|
+
source_kind: str = "long_term_summary",
|
|
66
|
+
source_tag: str = "session",
|
|
67
|
+
# Target summary (what we produce)
|
|
68
|
+
summary_kind: str = "meta_summary",
|
|
69
|
+
summary_tag: str = "meta",
|
|
70
|
+
max_summaries: int = 20,
|
|
71
|
+
min_signal: float = 0.0,
|
|
72
|
+
model: str | None = None,
|
|
73
|
+
):
|
|
74
|
+
self.llm = llm
|
|
75
|
+
self.source_kind = source_kind
|
|
76
|
+
self.source_tag = source_tag
|
|
77
|
+
self.summary_kind = summary_kind
|
|
78
|
+
self.summary_tag = summary_tag
|
|
79
|
+
self.max_summaries = max_summaries
|
|
80
|
+
self.min_signal = min_signal
|
|
81
|
+
self.model = model # optional model override
|
|
82
|
+
|
|
83
|
+
def _filter_source_summaries(self, events: Iterable[Event]) -> list[Event]:
|
|
84
|
+
"""
|
|
85
|
+
Keep only summary Events matching:
|
|
86
|
+
- kind == source_kind
|
|
87
|
+
- tags include source_tag (and ideally 'summary')
|
|
88
|
+
- signal >= min_signal
|
|
89
|
+
"""
|
|
90
|
+
out: list[Event] = []
|
|
91
|
+
for e in events:
|
|
92
|
+
if e.kind != self.source_kind:
|
|
93
|
+
continue
|
|
94
|
+
if (e.signal or 0.0) < self.min_signal:
|
|
95
|
+
continue
|
|
96
|
+
tags = set(e.tags or [])
|
|
97
|
+
if self.source_tag and self.source_tag not in tags:
|
|
98
|
+
continue
|
|
99
|
+
# Optional, but helps avoid mixing random summaries:
|
|
100
|
+
# require generic "summary" tag if present in your existing pipeline.
|
|
101
|
+
# if "summary" not in tags:
|
|
102
|
+
# continue
|
|
103
|
+
out.append(e)
|
|
104
|
+
return out
|
|
105
|
+
|
|
106
|
+
def _build_prompt(self, summaries: list[Event]) -> list[dict[str, str]]:
|
|
107
|
+
"""
|
|
108
|
+
Convert summary Events into a chat prompt for the LLM.
|
|
109
|
+
|
|
110
|
+
We use:
|
|
111
|
+
- e.text as the main human-readable summary preview.
|
|
112
|
+
- e.data.get("time_window") if present.
|
|
113
|
+
"""
|
|
114
|
+
|
|
115
|
+
lines: list[str] = []
|
|
116
|
+
|
|
117
|
+
for idx, e in enumerate(summaries, start=1):
|
|
118
|
+
tw = (e.data or {}).get("time_window") if e.data else None
|
|
119
|
+
tw_from = (tw or {}).get("from", e.ts)
|
|
120
|
+
tw_to = (tw or {}).get("to", e.ts)
|
|
121
|
+
body = e.text or ""
|
|
122
|
+
lines.append(f"Summary {idx} [{tw_from} → {tw_to}]:\n{body}\n")
|
|
123
|
+
|
|
124
|
+
transcript = "\n\n".join(lines)
|
|
125
|
+
|
|
126
|
+
system = (
|
|
127
|
+
"You are a higher-level summarizer over an agent's existing summaries. "
|
|
128
|
+
"Given multiple prior summaries (each covering a period of time), you "
|
|
129
|
+
"should produce a concise, higher-level meta-summary capturing: "
|
|
130
|
+
" - long-term themes and patterns, "
|
|
131
|
+
" - important user facts that remain true, "
|
|
132
|
+
" - long-running goals or open loops."
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
user = (
|
|
136
|
+
"Here are several previous summaries, each describing a time window:"
|
|
137
|
+
"\n\n"
|
|
138
|
+
f"{transcript}\n\n"
|
|
139
|
+
"Return a JSON object with keys: "
|
|
140
|
+
"`summary` (string), "
|
|
141
|
+
"`key_facts` (list of strings), "
|
|
142
|
+
"`open_loops` (list of strings). "
|
|
143
|
+
"Do not use markdown or include explanations outside the JSON."
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
return [
|
|
147
|
+
{"role": "system", "content": system},
|
|
148
|
+
{"role": "user", "content": user},
|
|
149
|
+
]
|
|
150
|
+
|
|
151
|
+
def _build_prompt_from_saved(self, summaries: list[dict[str, Any]]) -> list[dict[str, str]]:
|
|
152
|
+
"""
|
|
153
|
+
Build an LLM prompt from persisted summary JSONs.
|
|
154
|
+
|
|
155
|
+
Each summary dict is the JSON you showed:
|
|
156
|
+
{
|
|
157
|
+
"type": "long_term_summary",
|
|
158
|
+
"summary_tag": "session",
|
|
159
|
+
"summary": "...",
|
|
160
|
+
"time_window": {...},
|
|
161
|
+
...
|
|
162
|
+
}
|
|
163
|
+
"""
|
|
164
|
+
lines: list[str] = []
|
|
165
|
+
|
|
166
|
+
for idx, s in enumerate(summaries, start=1):
|
|
167
|
+
tw = s.get("time_window") or {}
|
|
168
|
+
tw_from = tw.get("from", s.get("ts"))
|
|
169
|
+
tw_to = tw.get("to", s.get("ts"))
|
|
170
|
+
body = s.get("summary", "") or ""
|
|
171
|
+
|
|
172
|
+
# (Optional) strip ```json fences if present
|
|
173
|
+
stripped = body.strip()
|
|
174
|
+
if stripped.startswith("```"):
|
|
175
|
+
# very minimal fence strip; you can refine later
|
|
176
|
+
stripped = stripped.strip("`")
|
|
177
|
+
# fall back to original if this gets too messy
|
|
178
|
+
body_for_prompt = stripped or body
|
|
179
|
+
else:
|
|
180
|
+
body_for_prompt = body
|
|
181
|
+
|
|
182
|
+
lines.append(f"Summary {idx} [{tw_from} → {tw_to}]:\n{body_for_prompt}\n")
|
|
183
|
+
|
|
184
|
+
transcript = "\n\n".join(lines)
|
|
185
|
+
|
|
186
|
+
system = (
|
|
187
|
+
"You are a higher-level summarizer over an agent's existing long-term summaries. "
|
|
188
|
+
"Given multiple prior summaries (each describing a period), produce a meta-summary "
|
|
189
|
+
"that captures long-term themes, stable user facts, and persistent open loops."
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
user = (
|
|
193
|
+
"Here are several previous summaries:\n\n"
|
|
194
|
+
f"{transcript}\n\n"
|
|
195
|
+
"Return a JSON object with keys: "
|
|
196
|
+
"`summary` (string), "
|
|
197
|
+
"`key_facts` (list of strings), "
|
|
198
|
+
"`open_loops` (list of strings). "
|
|
199
|
+
"Do not include any extra explanation outside the JSON."
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
return [
|
|
203
|
+
{"role": "system", "content": system},
|
|
204
|
+
{"role": "user", "content": user},
|
|
205
|
+
]
|
|
206
|
+
|
|
207
|
+
async def distill(
|
|
208
|
+
self,
|
|
209
|
+
run_id: str,
|
|
210
|
+
timeline_id: str,
|
|
211
|
+
scope_id: str = None,
|
|
212
|
+
*,
|
|
213
|
+
hotlog: HotLog,
|
|
214
|
+
persistence: Persistence,
|
|
215
|
+
indices: Indices,
|
|
216
|
+
docs: DocStore,
|
|
217
|
+
**kw: Any,
|
|
218
|
+
) -> dict[str, Any]:
|
|
219
|
+
"""
|
|
220
|
+
Distill method following the Distiller protocol.
|
|
221
|
+
|
|
222
|
+
IMPORTANT:
|
|
223
|
+
- This implementation is optimized for FSPersistence and reads
|
|
224
|
+
previously saved summary JSONs from:
|
|
225
|
+
mem/<scope_id>/summaries/<source_tag>/*.json
|
|
226
|
+
- If a different Persistence is used, we currently bail out.
|
|
227
|
+
"""
|
|
228
|
+
scope = scope_id or run_id
|
|
229
|
+
prefix = _summary_prefix(scope, self.source_tag)
|
|
230
|
+
|
|
231
|
+
# 1) Load existing long-term summary JSONs from DocStore
|
|
232
|
+
try:
|
|
233
|
+
all_ids = await docs.list()
|
|
234
|
+
except Exception:
|
|
235
|
+
all_ids = []
|
|
236
|
+
|
|
237
|
+
candidates = sorted(d for d in all_ids if d.startswith(prefix))
|
|
238
|
+
if not candidates:
|
|
239
|
+
return {}
|
|
240
|
+
|
|
241
|
+
chosen_ids = candidates[-self.max_summaries :]
|
|
242
|
+
summaries: list[dict[str, Any]] = []
|
|
243
|
+
for doc_id in chosen_ids:
|
|
244
|
+
try:
|
|
245
|
+
doc = await docs.get(doc_id)
|
|
246
|
+
if doc is not None:
|
|
247
|
+
summaries.append(doc) # type: ignore[arg-type]
|
|
248
|
+
except Exception:
|
|
249
|
+
continue
|
|
250
|
+
|
|
251
|
+
if not summaries:
|
|
252
|
+
return {}
|
|
253
|
+
|
|
254
|
+
# Optional: filter by min_signal if present in saved JSON
|
|
255
|
+
filtered: list[dict[str, Any]] = []
|
|
256
|
+
for s in summaries:
|
|
257
|
+
sig = (
|
|
258
|
+
float(s.get("signal", 0.0)) if isinstance(s.get("signal"), int | float) else 1.0
|
|
259
|
+
) # default 1.0
|
|
260
|
+
if sig < self.min_signal:
|
|
261
|
+
continue
|
|
262
|
+
# Also enforce type/tag consistency:
|
|
263
|
+
if s.get("type") != self.source_kind:
|
|
264
|
+
continue
|
|
265
|
+
if s.get("summary_tag") != self.source_tag:
|
|
266
|
+
continue
|
|
267
|
+
filtered.append(s)
|
|
268
|
+
|
|
269
|
+
if not filtered:
|
|
270
|
+
return {}
|
|
271
|
+
|
|
272
|
+
# Keep order as loaded (already sorted by filename)
|
|
273
|
+
kept = filtered
|
|
274
|
+
|
|
275
|
+
# 2) Derive aggregated time window
|
|
276
|
+
first_from = None
|
|
277
|
+
last_to = None
|
|
278
|
+
for s in kept:
|
|
279
|
+
tw = s.get("time_window") or {}
|
|
280
|
+
start = tw.get("from") or s.get("ts")
|
|
281
|
+
end = tw.get("to") or s.get("ts")
|
|
282
|
+
if start:
|
|
283
|
+
first_from = start if first_from is None else min(first_from, start)
|
|
284
|
+
if end:
|
|
285
|
+
last_to = end if last_to is None else max(last_to, end)
|
|
286
|
+
if first_from is None:
|
|
287
|
+
first_from = kept[0].get("ts")
|
|
288
|
+
if last_to is None:
|
|
289
|
+
last_to = kept[-1].get("ts")
|
|
290
|
+
|
|
291
|
+
# 3) Build prompt and call LLM
|
|
292
|
+
messages = self._build_prompt_from_saved(kept)
|
|
293
|
+
summary_json_str, usage = await self.llm.chat(messages)
|
|
294
|
+
|
|
295
|
+
# 4) Parse LLM JSON response
|
|
296
|
+
try:
|
|
297
|
+
payload = json.loads(summary_json_str)
|
|
298
|
+
except Exception:
|
|
299
|
+
payload = {
|
|
300
|
+
"summary": summary_json_str,
|
|
301
|
+
"key_facts": [],
|
|
302
|
+
"open_loops": [],
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
ts = now_iso()
|
|
306
|
+
summary_obj = {
|
|
307
|
+
"type": self.summary_kind,
|
|
308
|
+
"version": 1,
|
|
309
|
+
"run_id": run_id,
|
|
310
|
+
"scope_id": scope,
|
|
311
|
+
"summary_tag": self.summary_tag,
|
|
312
|
+
"source_summary_kind": self.source_kind,
|
|
313
|
+
"source_summary_tag": self.source_tag,
|
|
314
|
+
"ts": ts,
|
|
315
|
+
"time_window": {"from": first_from, "to": last_to},
|
|
316
|
+
"num_source_summaries": len(kept),
|
|
317
|
+
"source_summary_uris": [
|
|
318
|
+
# reconstruct the URI pattern we originally use
|
|
319
|
+
# (this assumes summaries were written under ar_summary_uri)
|
|
320
|
+
ar_summary_uri(scope, self.source_tag, s.get("ts", ts))
|
|
321
|
+
for s in kept
|
|
322
|
+
],
|
|
323
|
+
"summary": payload.get("summary", ""),
|
|
324
|
+
"key_facts": payload.get("key_facts", []),
|
|
325
|
+
"open_loops": payload.get("open_loops", []),
|
|
326
|
+
"llm_usage": usage,
|
|
327
|
+
"llm_model": getattr(self.llm, "model", None),
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
doc_id = _summary_doc_id(scope, self.summary_tag, ts)
|
|
331
|
+
await docs.put(doc_id, summary_obj)
|
|
332
|
+
|
|
333
|
+
# 5) Emit meta_summary Event
|
|
334
|
+
text = summary_obj["summary"] or ""
|
|
335
|
+
preview = text[:2000] + (" …[truncated]" if len(text) > 2000 else "")
|
|
336
|
+
|
|
337
|
+
evt = Event(
|
|
338
|
+
event_id="",
|
|
339
|
+
ts=ts,
|
|
340
|
+
run_id=run_id,
|
|
341
|
+
scope_id=scope,
|
|
342
|
+
kind=self.summary_kind,
|
|
343
|
+
stage="summary_llm_meta",
|
|
344
|
+
text=preview,
|
|
345
|
+
tags=["summary", "llm", self.summary_tag],
|
|
346
|
+
data={
|
|
347
|
+
"summary_doc_id": doc_id,
|
|
348
|
+
"summary_tag": self.summary_tag,
|
|
349
|
+
"time_window": summary_obj["time_window"],
|
|
350
|
+
"num_source_summaries": len(kept),
|
|
351
|
+
"source_summary_kind": self.source_kind,
|
|
352
|
+
"source_summary_tag": self.source_tag,
|
|
353
|
+
},
|
|
354
|
+
metrics={"num_source_summaries": len(kept)},
|
|
355
|
+
severity=2,
|
|
356
|
+
signal=0.8,
|
|
357
|
+
)
|
|
358
|
+
|
|
359
|
+
evt.event_id = stable_event_id(
|
|
360
|
+
{
|
|
361
|
+
"ts": ts,
|
|
362
|
+
"run_id": run_id,
|
|
363
|
+
"kind": self.summary_kind,
|
|
364
|
+
"summary_tag": self.summary_tag,
|
|
365
|
+
"preview": preview[:200],
|
|
366
|
+
}
|
|
367
|
+
)
|
|
368
|
+
|
|
369
|
+
await hotlog.append(timeline_id, evt, ttl_s=7 * 24 * 3600, limit=1000)
|
|
370
|
+
await persistence.append_event(timeline_id, evt)
|
|
371
|
+
|
|
372
|
+
# Metering: record summary event
|
|
373
|
+
try:
|
|
374
|
+
meter = current_metering()
|
|
375
|
+
ctx = current_meter_context.get()
|
|
376
|
+
user_id = ctx.get("user_id")
|
|
377
|
+
org_id = ctx.get("org_id")
|
|
378
|
+
|
|
379
|
+
await meter.record_event(
|
|
380
|
+
user_id=user_id,
|
|
381
|
+
org_id=org_id,
|
|
382
|
+
run_id=run_id,
|
|
383
|
+
scope_id=scope,
|
|
384
|
+
kind=f"memory.{self.summary_kind}", # e.g. "memory.long_term_summary"
|
|
385
|
+
)
|
|
386
|
+
except Exception:
|
|
387
|
+
import logging
|
|
388
|
+
|
|
389
|
+
logger = logging.getLogger("aethergraph.services.memory.distillers.llm_meta_summary")
|
|
390
|
+
logger.error("Failed to record metering event for llm_meta_summary")
|
|
391
|
+
|
|
392
|
+
return {
|
|
393
|
+
"summary_doc_id": doc_id,
|
|
394
|
+
"summary_kind": self.summary_kind,
|
|
395
|
+
"summary_tag": self.summary_tag,
|
|
396
|
+
"time_window": summary_obj["time_window"],
|
|
397
|
+
"num_source_summaries": len(kept),
|
|
398
|
+
}
|
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Iterable
|
|
4
|
+
import time
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from aethergraph.contracts.services.memory import Distiller, Event, HotLog, Indices, Persistence
|
|
8
|
+
|
|
9
|
+
# re-use stable_event_id from the MemoryFacade module
|
|
10
|
+
from aethergraph.contracts.storage.doc_store import DocStore
|
|
11
|
+
from aethergraph.core.runtime.runtime_metering import current_meter_context, current_metering
|
|
12
|
+
from aethergraph.services.memory.facade.utils import stable_event_id
|
|
13
|
+
from aethergraph.services.memory.utils import _summary_doc_id
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _now_iso() -> str:
|
|
17
|
+
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def ar_summary_uri_by_run_id(run_id: str, tag: str, ts: str) -> str:
|
|
21
|
+
"""
|
|
22
|
+
NOTE: To deprecate this function in favor of ar_summary_uri below.
|
|
23
|
+
|
|
24
|
+
Save summaries under the same base "mem/<run_id>/..." tree as append_event,
|
|
25
|
+
but using a file:// URI so FSPersistence can handle it.
|
|
26
|
+
"""
|
|
27
|
+
safe_ts = ts.replace(":", "-")
|
|
28
|
+
return f"file://mem/{run_id}/summaries/{tag}/{safe_ts}.json"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def ar_summary_uri(scope_id: str, tag: str, ts: str) -> str:
|
|
32
|
+
"""
|
|
33
|
+
Scope summaries by a logical memory scope, not by run_id.
|
|
34
|
+
In simple setups, scope_id == run_id. For long-lived companions, scope_id
|
|
35
|
+
might be something like "user:zcliu:persona:companion_v1".
|
|
36
|
+
"""
|
|
37
|
+
safe_ts = ts.replace(":", "-")
|
|
38
|
+
return f"file://mem/{scope_id}/summaries/{tag}/{safe_ts}.json"
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class LongTermSummarizer(Distiller):
|
|
42
|
+
"""
|
|
43
|
+
Generic long-term summarizer.
|
|
44
|
+
|
|
45
|
+
Goal:
|
|
46
|
+
- Take a slice of recent events (by kind and/or tag).
|
|
47
|
+
- Build a compact textual digest plus small structured metadata.
|
|
48
|
+
- Persist the summary as JSON via Persistence.save_json(...).
|
|
49
|
+
- Emit a summary Event with kind=summary_kind and data["summary_uri"].
|
|
50
|
+
|
|
51
|
+
This does NOT call an LLM by itself; it's a structural/logical summarizer.
|
|
52
|
+
An LLM-based distiller can be layered on top later (using the same URI scheme).
|
|
53
|
+
|
|
54
|
+
Typical usage:
|
|
55
|
+
- Kinds: ["chat_user", "chat_assistant"] or app-specific kinds.
|
|
56
|
+
- Tag: "session", "daily", "episode:<id>", etc.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
def __init__(
|
|
60
|
+
self,
|
|
61
|
+
*,
|
|
62
|
+
summary_kind: str = "long_term_summary",
|
|
63
|
+
summary_tag: str = "session",
|
|
64
|
+
include_kinds: list[str] | None = None,
|
|
65
|
+
include_tags: list[str] | None = None,
|
|
66
|
+
max_events: int = 200,
|
|
67
|
+
min_signal: float = 0.0,
|
|
68
|
+
):
|
|
69
|
+
self.summary_kind = summary_kind
|
|
70
|
+
self.summary_tag = summary_tag
|
|
71
|
+
self.include_kinds = include_kinds
|
|
72
|
+
self.include_tags = include_tags
|
|
73
|
+
self.max_events = max_events
|
|
74
|
+
self.min_signal = min_signal
|
|
75
|
+
|
|
76
|
+
def _filter_events(self, events: Iterable[Event]) -> list[Event]:
|
|
77
|
+
out: list[Event] = []
|
|
78
|
+
kinds = set(self.include_kinds) if self.include_kinds else None
|
|
79
|
+
tags = set(self.include_tags) if self.include_tags else None
|
|
80
|
+
|
|
81
|
+
for e in events:
|
|
82
|
+
if kinds is not None and e.kind not in kinds:
|
|
83
|
+
continue
|
|
84
|
+
if tags is not None:
|
|
85
|
+
if not e.tags:
|
|
86
|
+
continue
|
|
87
|
+
if not tags.issubset(set(e.tags)):
|
|
88
|
+
continue
|
|
89
|
+
if (e.signal or 0.0) < self.min_signal:
|
|
90
|
+
continue
|
|
91
|
+
out.append(e)
|
|
92
|
+
return out
|
|
93
|
+
|
|
94
|
+
async def distill(
|
|
95
|
+
self,
|
|
96
|
+
run_id: str,
|
|
97
|
+
timeline_id: str,
|
|
98
|
+
scope_id: str = None,
|
|
99
|
+
*,
|
|
100
|
+
hotlog: HotLog,
|
|
101
|
+
persistence: Persistence,
|
|
102
|
+
indices: Indices,
|
|
103
|
+
docs: DocStore,
|
|
104
|
+
**kw: Any,
|
|
105
|
+
) -> dict[str, Any]:
|
|
106
|
+
"""
|
|
107
|
+
Steps:
|
|
108
|
+
1) Grab recent events from HotLog for this run.
|
|
109
|
+
2) Filter by kinds/tags/min_signal.
|
|
110
|
+
3) Build a digest:
|
|
111
|
+
- simple text transcript (role: text)
|
|
112
|
+
- metadata: ts range, num events
|
|
113
|
+
4) Save JSON summary via Persistence.save_json(file://...).
|
|
114
|
+
5) Log a summary Event to hotlog + persistence, with data.summary_uri.
|
|
115
|
+
"""
|
|
116
|
+
# 1) fetch more than we might keep to give filter some slack
|
|
117
|
+
raw = await hotlog.recent(timeline_id, kinds=None, limit=self.max_events * 2)
|
|
118
|
+
kept = self._filter_events(raw)
|
|
119
|
+
if not kept:
|
|
120
|
+
return {}
|
|
121
|
+
|
|
122
|
+
# keep only max_events most recent
|
|
123
|
+
kept = kept[-self.max_events :]
|
|
124
|
+
|
|
125
|
+
# 2) Build digest text (simple transcript-like format)
|
|
126
|
+
lines: list[str] = []
|
|
127
|
+
src_ids: list[str] = []
|
|
128
|
+
first_ts = kept[0].ts
|
|
129
|
+
last_ts = kept[-1].ts
|
|
130
|
+
|
|
131
|
+
for e in kept:
|
|
132
|
+
role = e.stage or e.kind or "event"
|
|
133
|
+
if e.text:
|
|
134
|
+
lines.append(f"[{role}] {e.text}")
|
|
135
|
+
src_ids.append(e.event_id)
|
|
136
|
+
|
|
137
|
+
digest_text = "\n".join(lines)
|
|
138
|
+
ts = _now_iso()
|
|
139
|
+
|
|
140
|
+
# 3) Summary JSON shape
|
|
141
|
+
summary = {
|
|
142
|
+
"type": self.summary_kind,
|
|
143
|
+
"version": 1,
|
|
144
|
+
"run_id": run_id,
|
|
145
|
+
"scope_id": scope_id or run_id,
|
|
146
|
+
"summary_tag": self.summary_tag,
|
|
147
|
+
"ts": ts,
|
|
148
|
+
"time_window": {
|
|
149
|
+
"from": first_ts,
|
|
150
|
+
"to": last_ts,
|
|
151
|
+
},
|
|
152
|
+
"num_events": len(kept),
|
|
153
|
+
"source_event_ids": src_ids,
|
|
154
|
+
"text": digest_text,
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
# 4) Persist JSON summary via DocStore
|
|
158
|
+
scope = scope_id or run_id
|
|
159
|
+
doc_id = _summary_doc_id(scope, self.summary_tag, ts)
|
|
160
|
+
await docs.put(doc_id, summary)
|
|
161
|
+
|
|
162
|
+
# 5) Emit summary Event
|
|
163
|
+
# NOTE: we only store a preview in text and full summary in data["summary_uri"]
|
|
164
|
+
preview = digest_text[:2000] + (" …[truncated]" if len(digest_text) > 2000 else "")
|
|
165
|
+
|
|
166
|
+
evt = Event(
|
|
167
|
+
event_id="", # fill below
|
|
168
|
+
ts=ts,
|
|
169
|
+
run_id=run_id,
|
|
170
|
+
scope_id=scope,
|
|
171
|
+
kind=self.summary_kind,
|
|
172
|
+
stage="summary",
|
|
173
|
+
text=preview,
|
|
174
|
+
tags=["summary", self.summary_tag],
|
|
175
|
+
data={
|
|
176
|
+
"summary_doc_id": doc_id,
|
|
177
|
+
"summary_tag": self.summary_tag,
|
|
178
|
+
"time_window": summary["time_window"],
|
|
179
|
+
"num_events": len(kept),
|
|
180
|
+
},
|
|
181
|
+
metrics={"num_events": len(kept)},
|
|
182
|
+
severity=1,
|
|
183
|
+
signal=0.5,
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
evt.event_id = stable_event_id(
|
|
187
|
+
{
|
|
188
|
+
"ts": ts,
|
|
189
|
+
"run_id": run_id,
|
|
190
|
+
"kind": self.summary_kind,
|
|
191
|
+
"summary_tag": self.summary_tag,
|
|
192
|
+
"text": preview[:200],
|
|
193
|
+
}
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
await hotlog.append(timeline_id, evt, ttl_s=7 * 24 * 3600, limit=1000)
|
|
197
|
+
await persistence.append_event(timeline_id, evt)
|
|
198
|
+
|
|
199
|
+
# Metering: record summary event
|
|
200
|
+
try:
|
|
201
|
+
meter = current_metering()
|
|
202
|
+
ctx = current_meter_context.get()
|
|
203
|
+
user_id = ctx.get("user_id")
|
|
204
|
+
org_id = ctx.get("org_id")
|
|
205
|
+
|
|
206
|
+
await meter.record_event(
|
|
207
|
+
user_id=user_id,
|
|
208
|
+
org_id=org_id,
|
|
209
|
+
run_id=run_id,
|
|
210
|
+
scope_id=scope,
|
|
211
|
+
kind=f"memory.{self.summary_kind}", # e.g. "memory.long_term_summary"
|
|
212
|
+
)
|
|
213
|
+
except Exception:
|
|
214
|
+
import logging
|
|
215
|
+
|
|
216
|
+
logger = logging.getLogger("aethergraph.services.memory.distillers.long_term")
|
|
217
|
+
logger.error("Failed to record metering event for long_term_summary")
|
|
218
|
+
|
|
219
|
+
return {
|
|
220
|
+
"summary_doc_id": doc_id,
|
|
221
|
+
"summary_kind": self.summary_kind,
|
|
222
|
+
"summary_tag": self.summary_tag,
|
|
223
|
+
"time_window": summary["time_window"],
|
|
224
|
+
"num_events": len(kept),
|
|
225
|
+
}
|