aethergraph 0.1.0a3__py3-none-any.whl → 0.1.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. aethergraph/api/v1/artifacts.py +23 -4
  2. aethergraph/api/v1/schemas.py +7 -0
  3. aethergraph/api/v1/session.py +123 -4
  4. aethergraph/config/config.py +2 -0
  5. aethergraph/config/search.py +49 -0
  6. aethergraph/contracts/services/channel.py +18 -1
  7. aethergraph/contracts/services/execution.py +58 -0
  8. aethergraph/contracts/services/llm.py +26 -0
  9. aethergraph/contracts/services/memory.py +10 -4
  10. aethergraph/contracts/services/planning.py +53 -0
  11. aethergraph/contracts/storage/event_log.py +8 -0
  12. aethergraph/contracts/storage/search_backend.py +47 -0
  13. aethergraph/contracts/storage/vector_index.py +73 -0
  14. aethergraph/core/graph/action_spec.py +76 -0
  15. aethergraph/core/graph/graph_fn.py +75 -2
  16. aethergraph/core/graph/graphify.py +74 -2
  17. aethergraph/core/runtime/graph_runner.py +2 -1
  18. aethergraph/core/runtime/node_context.py +66 -3
  19. aethergraph/core/runtime/node_services.py +8 -0
  20. aethergraph/core/runtime/run_manager.py +263 -271
  21. aethergraph/core/runtime/run_types.py +54 -1
  22. aethergraph/core/runtime/runtime_env.py +35 -14
  23. aethergraph/core/runtime/runtime_services.py +308 -18
  24. aethergraph/plugins/agents/default_chat_agent.py +266 -74
  25. aethergraph/plugins/agents/default_chat_agent_v2.py +487 -0
  26. aethergraph/plugins/channel/adapters/webui.py +69 -21
  27. aethergraph/plugins/channel/routes/webui_routes.py +8 -48
  28. aethergraph/runtime/__init__.py +12 -0
  29. aethergraph/server/app_factory.py +3 -0
  30. aethergraph/server/ui_static/assets/index-CFktGdbW.js +4913 -0
  31. aethergraph/server/ui_static/assets/index-DcfkFlTA.css +1 -0
  32. aethergraph/server/ui_static/index.html +2 -2
  33. aethergraph/services/artifacts/facade.py +157 -21
  34. aethergraph/services/artifacts/types.py +35 -0
  35. aethergraph/services/artifacts/utils.py +42 -0
  36. aethergraph/services/channel/channel_bus.py +3 -1
  37. aethergraph/services/channel/event_hub copy.py +55 -0
  38. aethergraph/services/channel/event_hub.py +81 -0
  39. aethergraph/services/channel/factory.py +3 -2
  40. aethergraph/services/channel/session.py +709 -74
  41. aethergraph/services/container/default_container.py +69 -7
  42. aethergraph/services/execution/__init__.py +0 -0
  43. aethergraph/services/execution/local_python.py +118 -0
  44. aethergraph/services/indices/__init__.py +0 -0
  45. aethergraph/services/indices/global_indices.py +21 -0
  46. aethergraph/services/indices/scoped_indices.py +292 -0
  47. aethergraph/services/llm/generic_client.py +342 -46
  48. aethergraph/services/llm/generic_embed_client.py +359 -0
  49. aethergraph/services/llm/types.py +3 -1
  50. aethergraph/services/memory/distillers/llm_long_term.py +60 -109
  51. aethergraph/services/memory/distillers/llm_long_term_v1.py +180 -0
  52. aethergraph/services/memory/distillers/llm_meta_summary.py +57 -266
  53. aethergraph/services/memory/distillers/llm_meta_summary_v1.py +342 -0
  54. aethergraph/services/memory/distillers/long_term.py +48 -131
  55. aethergraph/services/memory/distillers/long_term_v1.py +170 -0
  56. aethergraph/services/memory/facade/chat.py +18 -8
  57. aethergraph/services/memory/facade/core.py +159 -19
  58. aethergraph/services/memory/facade/distillation.py +86 -31
  59. aethergraph/services/memory/facade/retrieval.py +100 -1
  60. aethergraph/services/memory/factory.py +4 -1
  61. aethergraph/services/planning/__init__.py +0 -0
  62. aethergraph/services/planning/action_catalog.py +271 -0
  63. aethergraph/services/planning/bindings.py +56 -0
  64. aethergraph/services/planning/dependency_index.py +65 -0
  65. aethergraph/services/planning/flow_validator.py +263 -0
  66. aethergraph/services/planning/graph_io_adapter.py +150 -0
  67. aethergraph/services/planning/input_parser.py +312 -0
  68. aethergraph/services/planning/missing_inputs.py +28 -0
  69. aethergraph/services/planning/node_planner.py +613 -0
  70. aethergraph/services/planning/orchestrator.py +112 -0
  71. aethergraph/services/planning/plan_executor.py +506 -0
  72. aethergraph/services/planning/plan_types.py +321 -0
  73. aethergraph/services/planning/planner.py +617 -0
  74. aethergraph/services/planning/planner_service.py +369 -0
  75. aethergraph/services/planning/planning_context_builder.py +43 -0
  76. aethergraph/services/planning/quick_actions.py +29 -0
  77. aethergraph/services/planning/routers/__init__.py +0 -0
  78. aethergraph/services/planning/routers/simple_router.py +26 -0
  79. aethergraph/services/rag/facade.py +0 -3
  80. aethergraph/services/scope/scope.py +30 -30
  81. aethergraph/services/scope/scope_factory.py +15 -7
  82. aethergraph/services/skills/__init__.py +0 -0
  83. aethergraph/services/skills/skill_registry.py +465 -0
  84. aethergraph/services/skills/skills.py +220 -0
  85. aethergraph/services/skills/utils.py +194 -0
  86. aethergraph/storage/artifacts/artifact_index_jsonl.py +16 -10
  87. aethergraph/storage/artifacts/artifact_index_sqlite.py +12 -2
  88. aethergraph/storage/docstore/sqlite_doc_sync.py +1 -1
  89. aethergraph/storage/memory/event_persist.py +42 -2
  90. aethergraph/storage/memory/fs_persist.py +32 -2
  91. aethergraph/storage/search_backend/__init__.py +0 -0
  92. aethergraph/storage/search_backend/generic_vector_backend.py +230 -0
  93. aethergraph/storage/search_backend/null_backend.py +34 -0
  94. aethergraph/storage/search_backend/sqlite_lexical_backend.py +387 -0
  95. aethergraph/storage/search_backend/utils.py +31 -0
  96. aethergraph/storage/search_factory.py +75 -0
  97. aethergraph/storage/vector_index/faiss_index.py +72 -4
  98. aethergraph/storage/vector_index/sqlite_index.py +521 -52
  99. aethergraph/storage/vector_index/sqlite_index_vanila.py +311 -0
  100. aethergraph/storage/vector_index/utils.py +22 -0
  101. {aethergraph-0.1.0a3.dist-info → aethergraph-0.1.0a4.dist-info}/METADATA +1 -1
  102. {aethergraph-0.1.0a3.dist-info → aethergraph-0.1.0a4.dist-info}/RECORD +107 -63
  103. {aethergraph-0.1.0a3.dist-info → aethergraph-0.1.0a4.dist-info}/WHEEL +1 -1
  104. aethergraph/plugins/agents/default_chat_agent copy.py +0 -90
  105. aethergraph/server/ui_static/assets/index-BR5GtXcZ.css +0 -1
  106. aethergraph/server/ui_static/assets/index-CQ0HZZ83.js +0 -400
  107. aethergraph/services/eventhub/event_hub.py +0 -76
  108. aethergraph/services/llm/generic_client copy.py +0 -691
  109. aethergraph/services/prompts/file_store.py +0 -41
  110. {aethergraph-0.1.0a3.dist-info → aethergraph-0.1.0a4.dist-info}/entry_points.txt +0 -0
  111. {aethergraph-0.1.0a3.dist-info → aethergraph-0.1.0a4.dist-info}/licenses/LICENSE +0 -0
  112. {aethergraph-0.1.0a3.dist-info → aethergraph-0.1.0a4.dist-info}/licenses/NOTICE +0 -0
  113. {aethergraph-0.1.0a3.dist-info → aethergraph-0.1.0a4.dist-info}/top_level.txt +0 -0
@@ -2,11 +2,8 @@ from __future__ import annotations
2
2
 
3
3
  from typing import TYPE_CHECKING, Any
4
4
 
5
- from aethergraph.contracts.services.memory import Event
6
-
7
5
  # Assuming this external util exists based on original imports
8
6
  from ..utils import _summary_prefix
9
- from .utils import now_iso, stable_event_id
10
7
 
11
8
  if TYPE_CHECKING:
12
9
  from .types import MemoryFacadeInterface
@@ -115,16 +112,55 @@ class DistillationMixin:
115
112
  min_signal=min_signal if min_signal is not None else self.default_signal_threshold,
116
113
  )
117
114
 
118
- return await d.distill(
115
+ result = await d.distill(
119
116
  run_id=self.run_id,
120
117
  timeline_id=self.timeline_id,
121
118
  scope_id=scope_id or self.memory_scope_id,
122
119
  hotlog=self.hotlog,
123
- persistence=self.persistence,
124
- indices=self.indices,
125
120
  docs=self.docs,
126
121
  )
127
122
 
123
+ # If nothing returned, return empty dict
124
+ if not result:
125
+ return {}
126
+
127
+ # Record the summary as a memory event via record_raw
128
+ preview = result.get("preview", "")
129
+ num_events = result.get("num_events", 0)
130
+ time_window = result.get("time_window", {})
131
+
132
+ # Use a different stage + tags depending on LLM or not
133
+ stage = "summary_llm" if use_llm else "summary"
134
+ tags = ["summary", summary_tag]
135
+ if use_llm:
136
+ tags.append("llm")
137
+
138
+ evt = await self.record_raw(
139
+ base={
140
+ "kind": summary_kind, # e.g. "long_term_summary"
141
+ "stage": stage, # "summary_llm" or "summary"
142
+ "tags": tags,
143
+ "data": {
144
+ "summary_doc_id": result.get("summary_doc_id"),
145
+ "summary_tag": summary_tag,
146
+ "time_window": time_window,
147
+ "num_events": num_events,
148
+ },
149
+ "scope_id": scope_id,
150
+ # run_id / graph_id / node_id / session_id / user/org/client
151
+ # etc. are filled in by record_raw from self.scope.
152
+ "severity": 2,
153
+ # optional: slight bias; record_raw will compute a default signal if None
154
+ "signal": 0.7 if use_llm else None,
155
+ },
156
+ text=preview,
157
+ metrics={"num_events": num_events},
158
+ )
159
+
160
+ # Optionally return the event_id with the result
161
+ result["event_id"] = evt.event_id
162
+ return result
163
+
128
164
  async def distill_meta_summary(
129
165
  self,
130
166
  scope_id: str | None = None,
@@ -217,16 +253,44 @@ class DistillationMixin:
217
253
  max_summaries=max_summaries,
218
254
  min_signal=min_signal if min_signal is not None else self.default_signal_threshold,
219
255
  )
220
- return await d.distill(
256
+ result = await d.distill(
221
257
  run_id=self.run_id,
222
258
  timeline_id=self.timeline_id,
223
259
  scope_id=scope_id or self.memory_scope_id,
224
260
  hotlog=self.hotlog,
225
- persistence=self.persistence,
226
- indices=self.indices,
227
261
  docs=self.docs,
228
262
  )
229
263
 
264
+ # If nothing returned, return empty dict
265
+ if not result:
266
+ return {}
267
+ # Record the meta-summary as a memory event via record_raw
268
+ preview = result.get("preview", "")
269
+ num_summaries = result.get("num_source_summaries", 0)
270
+ time_window = result.get("time_window", {})
271
+ evt = await self.record_raw(
272
+ base={
273
+ "kind": summary_kind, # e.g. "meta_summary"
274
+ "stage": "meta_summary_llm",
275
+ "tags": ["summary", "llm", summary_tag],
276
+ "data": {
277
+ "summary_doc_id": result.get("summary_doc_id"),
278
+ "summary_tag": summary_tag,
279
+ "time_window": time_window,
280
+ "num_source_summaries": num_summaries,
281
+ },
282
+ "scope_id": scope_id,
283
+ # run_id / graph_id / node_id / session_id / user/org/client
284
+ # etc. are filled in by record_raw from self.scope.
285
+ "severity": 2,
286
+ "signal": 0.8,
287
+ },
288
+ text=preview,
289
+ metrics={"num_source_summaries": num_summaries},
290
+ )
291
+ result["event_id"] = evt.event_id
292
+ return result
293
+
230
294
  async def load_last_summary(
231
295
  self,
232
296
  scope_id: str | None = None,
@@ -393,32 +457,23 @@ class DistillationMixin:
393
457
  if not summary:
394
458
  return None
395
459
 
396
- text = summary.get("text") or ""
460
+ text = summary.get("text") or summary.get("summary") or "" # try both fields
397
461
  preview = text[:2000] + (" …[truncated]" if len(text) > 2000 else "")
398
462
 
399
- evt = Event(
400
- scope_id=self.memory_scope_id or self.run_id,
401
- event_id=stable_event_id(
402
- {
403
- "ts": now_iso(),
404
- "run_id": self.run_id,
405
- "kind": f"{summary_kind}_hydrate",
406
- "summary_tag": summary_tag,
407
- "preview": preview[:200],
408
- }
409
- ),
410
- ts=now_iso(),
411
- run_id=self.run_id,
412
- kind=f"{summary_kind}_hydrate",
413
- stage="hydrate",
463
+ await self.record_raw(
464
+ base={
465
+ "kind": f"{summary_kind}_hydrate",
466
+ "stage": "hydrate",
467
+ "tags": ["summary", "hydrate", summary_tag],
468
+ "data": {"summary": summary},
469
+ "scope_id": scope_id,
470
+ # run_id / graph_id / node_id / session_id / user/org/client
471
+ # etc. are filled in by record_raw from self.scope.
472
+ "severity": 1,
473
+ "signal": 0.4,
474
+ },
414
475
  text=preview,
415
- tags=["summary", "hydrate", summary_tag],
416
- data={"summary": summary},
417
476
  metrics={"num_events": summary.get("num_events", 0)},
418
- severity=1,
419
- signal=0.4,
420
477
  )
421
478
 
422
- await self.hotlog.append(self.timeline_id, evt, ttl_s=self.hot_ttl_s, limit=self.hot_limit)
423
- await self.persistence.append_event(self.timeline_id, evt)
424
479
  return summary
@@ -1,7 +1,9 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import json
4
- from typing import TYPE_CHECKING, Any
4
+ from typing import TYPE_CHECKING, Any, NamedTuple
5
+
6
+ from aethergraph.contracts.storage.search_backend import ScoredItem
5
7
 
6
8
  if TYPE_CHECKING:
7
9
  from aethergraph.contracts.services.memory import Event
@@ -9,9 +11,52 @@ if TYPE_CHECKING:
9
11
  from .types import MemoryFacadeInterface
10
12
 
11
13
 
14
+ class EventSearchResult(NamedTuple):
15
+ item: ScoredItem
16
+ event: Event | None
17
+
18
+ @property
19
+ def score(self) -> float:
20
+ return self.item.score
21
+
22
+
12
23
  class RetrievalMixin:
13
24
  """Methods for retrieving events and values."""
14
25
 
26
+ async def get_event(self, event_id: str) -> Event | None:
27
+ """
28
+ Retrieve a specific event by its ID.
29
+
30
+ This method fetches an event corresponding to the provided event ID.
31
+
32
+ Args:
33
+ event_id: The unique identifier of the event to retrieve.
34
+
35
+ Returns:
36
+ Event | None: The event object if found; otherwise, None.
37
+
38
+ Notes:
39
+ This method interacts with the underlying Persistence service to fetch
40
+ the event associated with the current timeline. If no event is found
41
+ with the given ID, it returns None.
42
+ """
43
+ # 1) Try hotlog
44
+ recent = await self.hotlog.recent(
45
+ self.timeline_id,
46
+ kinds=None,
47
+ limit=self.hot_limit,
48
+ )
49
+ for e in recent:
50
+ if e.event_id == event_id:
51
+ return e
52
+
53
+ # 2) Fallback to persistence
54
+ if hasattr(self.persistence, "get_events_by_ids"):
55
+ events = await self.persistence.get_events_by_ids(self.timeline_id, [event_id])
56
+ return events[0] if events else None
57
+
58
+ return None
59
+
15
60
  async def recent(
16
61
  self: MemoryFacadeInterface, *, kinds: list[str] | None = None, limit: int = 50
17
62
  ) -> list[Event]:
@@ -36,6 +81,21 @@ class RetrievalMixin:
36
81
  """
37
82
  return await self.hotlog.recent(self.timeline_id, kinds=kinds, limit=limit)
38
83
 
84
+ async def recent_events(
85
+ self,
86
+ *,
87
+ kinds: list[str] | None = None,
88
+ tags: list[str] | None = None,
89
+ limit: int = 50,
90
+ overfetch: int = 5,
91
+ ) -> list[Event]:
92
+ fetch_n = limit if not tags else max(limit * overfetch, 100)
93
+ evts = await self.recent(kinds=kinds, limit=fetch_n)
94
+ if tags:
95
+ want = set(tags)
96
+ evts = [e for e in evts if want.issubset(set(e.tags or []))]
97
+ return evts[-limit:]
98
+
39
99
  async def recent_data(
40
100
  self: MemoryFacadeInterface,
41
101
  *,
@@ -137,3 +197,42 @@ class RetrievalMixin:
137
197
  # if not (self.llm and any(e.embedding for e in events)): return lexical_hits or events
138
198
  # ... logic ...
139
199
  return lexical_hits or events
200
+
201
+ async def fetch_events_for_search_results(
202
+ self,
203
+ scored_items: list[ScoredItem],
204
+ corpus: str = "event",
205
+ ) -> list[EventSearchResult]:
206
+ """
207
+ Given a list of ScoredItems from a search, fetch the corresponding Event objects.
208
+ """
209
+
210
+ # Filter to event corpus
211
+ event_items = [item for item in scored_items if item.corpus == corpus]
212
+ if not event_items:
213
+ return []
214
+
215
+ ids = [it.item_id for it in event_items]
216
+
217
+ # 1) Try hotlog first
218
+ recent = await self.hotlog.recent(
219
+ self.timeline_id,
220
+ kinds=None,
221
+ limit=1,
222
+ # limit=self.hot_limit,
223
+ )
224
+ by_id: dict[str, Event] = {e.event_id: e for e in recent if e.event_id in ids}
225
+
226
+ # 2) Fallback to persistence for misses
227
+ missing_ids = [eid for eid in ids if eid not in by_id]
228
+ if missing_ids and hasattr(self.persistence, "get_events_by_ids"):
229
+ persisted = await self.persistence.get_events_by_ids(self.timeline_id, missing_ids)
230
+ for e in persisted:
231
+ by_id[e.event_id] = e
232
+
233
+ # 3) Build results
234
+ results: list[EventSearchResult] = []
235
+ for item in event_items:
236
+ evt = by_id.get(item.item_id)
237
+ results.append(EventSearchResult(item=item, event=evt))
238
+ return results
@@ -6,6 +6,7 @@ from typing import Any
6
6
  from aethergraph.contracts.services.artifacts import AsyncArtifactStore # generic protocol
7
7
  from aethergraph.contracts.services.memory import HotLog, Indices, Persistence
8
8
  from aethergraph.contracts.storage.doc_store import DocStore
9
+ from aethergraph.services.indices.scoped_indices import ScopedIndices
9
10
  from aethergraph.services.memory.facade import MemoryFacade
10
11
  from aethergraph.services.scope.scope import Scope
11
12
 
@@ -63,6 +64,7 @@ class MemoryFactory:
63
64
  node_id: str | None = None,
64
65
  session_id: str | None = None,
65
66
  scope: Scope | None = None,
67
+ scoped_indices: ScopedIndices | None = None,
66
68
  ) -> MemoryFacade:
67
69
  return MemoryFacade(
68
70
  run_id=run_id,
@@ -72,7 +74,8 @@ class MemoryFactory:
72
74
  scope=scope,
73
75
  hotlog=self.hotlog,
74
76
  persistence=self.persistence,
75
- indices=self.indices,
77
+ mem_indices=self.indices,
78
+ scoped_indices=scoped_indices,
76
79
  docs=self.docs,
77
80
  artifact_store=self.artifacts,
78
81
  hot_limit=self.hot_limit,
File without changes
@@ -0,0 +1,271 @@
1
+ # aethergraph/services/planning/action_catalog.py
2
+ from __future__ import annotations
3
+
4
+ from collections.abc import Iterable, Iterator
5
+ from dataclasses import dataclass
6
+ from typing import TYPE_CHECKING, Literal
7
+
8
+ from aethergraph.core.graph.action_spec import ActionSpec, IOSlot
9
+ from aethergraph.services.planning.graph_io_adapter import graph_io_to_slots
10
+ from aethergraph.services.registry.registry_key import Key
11
+ from aethergraph.services.registry.unified_registry import UnifiedRegistry
12
+
13
+ if TYPE_CHECKING:
14
+ from aethergraph.core.graph.graph_fn import GraphFunction
15
+
16
+
17
+ @dataclass
18
+ class ActionCatalog:
19
+ registry: UnifiedRegistry
20
+
21
+ # --- builders ---------------------------------------------------------
22
+
23
+ def _build_graphfn_spec(self, name: str, version: str | None = None) -> ActionSpec:
24
+ gf: GraphFunction = self.registry.get_graphfn(name, version=version)
25
+ meta = self.registry.get_meta("graphfn", name, version=version) or {}
26
+ io = gf.io_signature()
27
+
28
+ flow_id = meta.get("flow_id", None)
29
+ tags = meta.get("tags", []) or []
30
+ description = meta.get("description", None)
31
+
32
+ # io_signature from GraphFunction are already IOSlot
33
+ inputs: list[IOSlot] = io.get("inputs", [])
34
+ outputs: list[IOSlot] = io.get("outputs", [])
35
+
36
+ # resolve final version used
37
+ latest_version = self.registry.list_graphfns().get(
38
+ f"graphfn:{name}",
39
+ version or "0.0.0",
40
+ )
41
+
42
+ return ActionSpec(
43
+ name=name,
44
+ ref=Key(nspace="graphfn", name=name, version=latest_version).canonical(),
45
+ kind="graphfn",
46
+ version=latest_version,
47
+ inputs=inputs,
48
+ outputs=outputs,
49
+ description=description,
50
+ tags=tags,
51
+ flow_id=flow_id,
52
+ )
53
+
54
+ def _build_graph_spec(self, name: str, version: str | None = None) -> ActionSpec:
55
+ g = self.registry.get_graph(name, version=version)
56
+ meta = self.registry.get_meta("graph", name, version=version) or {}
57
+
58
+ flow_id = meta.get("flow_id")
59
+ tags = meta.get("tags") or []
60
+ description = meta.get("description") or name
61
+
62
+ # pass meta to adapter so it can use io_types
63
+ io_slots = graph_io_to_slots(g, meta=meta)
64
+ inputs = io_slots["inputs"]
65
+ outputs = io_slots["outputs"]
66
+
67
+ latest_version = self.registry.list_graphs().get(
68
+ f"graph:{name}",
69
+ version or "0.0.0",
70
+ )
71
+
72
+ return ActionSpec(
73
+ name=name,
74
+ ref=Key(nspace="graph", name=name, version=latest_version).canonical(),
75
+ kind="graph",
76
+ version=latest_version,
77
+ description=description,
78
+ tags=list(tags),
79
+ flow_id=flow_id,
80
+ inputs=inputs,
81
+ outputs=outputs,
82
+ )
83
+
84
+ # --- listing / filtering ---------------------------------------------
85
+
86
+ def _flow_filter(
87
+ self,
88
+ spec: ActionSpec,
89
+ *,
90
+ flow_ids: list[str] | None,
91
+ include_global: bool,
92
+ ) -> bool:
93
+ """
94
+ Decide whether to include this spec given flow_ids and include_global.
95
+ - spec.flow_id is a single string or None.
96
+ - flow_ids is the set of flows we care about, or None for 'no filtering'.
97
+ """
98
+ if flow_ids is None:
99
+ # no restriction → include everything
100
+ return True
101
+
102
+ if spec.flow_id in flow_ids:
103
+ return True
104
+
105
+ # allow "global" actions when requested
106
+ if include_global and spec.flow_id is None: # noqa: SIM103
107
+ return True
108
+
109
+ return False
110
+
111
+ def list_actions(
112
+ self,
113
+ *,
114
+ flow_ids: list[str] | None = None,
115
+ kinds: Iterable[Literal["graph", "graphfn"]] | None = ("graph", "graphfn"),
116
+ include_global: bool = True,
117
+ ) -> list[ActionSpec]:
118
+ """
119
+ Return all ActionSpecs, optionally filtered by:
120
+ - kinds (graph vs graphfn)
121
+ - flow_ids (one or more flow ids)
122
+ - include_global: if True, also include actions with flow_id=None
123
+ """
124
+ specs: list[ActionSpec] = []
125
+
126
+ if kinds is None:
127
+ kinds = ("graph", "graphfn")
128
+
129
+ if "graphfn" in kinds:
130
+ for key, ver in self.registry.list_graphfns().items():
131
+ _, name = key.split(":", 1)
132
+ spec = self._build_graphfn_spec(name, version=ver)
133
+ if not self._flow_filter(spec, flow_ids=flow_ids, include_global=include_global):
134
+ continue
135
+ specs.append(spec)
136
+
137
+ if "graph" in kinds:
138
+ for key, ver in self.registry.list_graphs().items():
139
+ _, name = key.split(":", 1)
140
+ spec = self._build_graph_spec(name, version=ver)
141
+ if not self._flow_filter(spec, flow_ids=flow_ids, include_global=include_global):
142
+ continue
143
+ specs.append(spec)
144
+
145
+ # stable order
146
+ specs.sort(key=lambda s: (s.flow_id or "", s.name, s.version))
147
+ return specs
148
+
149
+ def iter_actions(
150
+ self,
151
+ *,
152
+ flow_ids: list[str] | None = None,
153
+ kinds: Iterable[Literal["graph", "graphfn"]] | None = ("graph", "graphfn"),
154
+ include_global: bool = True,
155
+ ) -> Iterator[ActionSpec]:
156
+ for spec in self.list_actions( # noqa: UP028
157
+ flow_ids=flow_ids,
158
+ kinds=kinds,
159
+ include_global=include_global,
160
+ ):
161
+ yield spec
162
+
163
+ # --- lookups ---------------------------------------------------------
164
+
165
+ def get_action(self, ref: str) -> ActionSpec | None:
166
+ kind, rest = ref.split(":", 1)
167
+ name, sep, version = rest.partition("@")
168
+ version = version or None
169
+
170
+ if kind == "graphfn":
171
+ return self._build_graphfn_spec(name, version=version)
172
+ if kind == "graph":
173
+ return self._build_graph_spec(name, version=version)
174
+ raise ValueError(f"Unknown action kind in ref: {ref}")
175
+
176
+ def get_action_by_name(
177
+ self,
178
+ name: str,
179
+ *,
180
+ kind: Literal["graph", "graphfn"] | None = None,
181
+ flow_ids: list[str] | None = None,
182
+ include_global: bool = True,
183
+ ) -> ActionSpec | None:
184
+ """
185
+ Convenience lookup: find an ActionSpec by its logical name.
186
+ """
187
+ if kind is None:
188
+ kinds: Iterable[Literal["graph", "graphfn"]] = ("graph", "graphfn")
189
+ else:
190
+ kinds = (kind,)
191
+
192
+ for spec in self.list_actions(
193
+ flow_ids=flow_ids,
194
+ kinds=kinds,
195
+ include_global=include_global,
196
+ ):
197
+ if spec.name == name:
198
+ return spec
199
+ return None
200
+
201
+ # --- LLM-facing renderers -------------------------------------------
202
+
203
+ def to_llm_prompt(
204
+ self,
205
+ *,
206
+ flow_ids: list[str] | None = None,
207
+ kinds: Iterable[Literal["graph", "graphfn"]] | None = ("graph", "graphfn"),
208
+ include_global: bool = True,
209
+ ) -> str:
210
+ actions = self.list_actions(
211
+ flow_ids=flow_ids,
212
+ kinds=kinds,
213
+ include_global=include_global,
214
+ )
215
+ lines: list[str] = []
216
+ for a in actions:
217
+ lines.append(f"- {a.name} ({a.kind})")
218
+ lines.append(f" ref: {a.ref}")
219
+ lines.append(f" description: {a.description}")
220
+ if a.tags:
221
+ lines.append(f" tags: {', '.join(a.tags)}")
222
+ if a.inputs:
223
+ lines.append(" inputs:")
224
+ for inp in a.inputs:
225
+ t = inp.type or "any"
226
+ req = "required" if inp.required else f"optional (default={inp.default!r})"
227
+ lines.append(f" - {inp.name}: {t}, {req}")
228
+ if a.outputs:
229
+ lines.append(" outputs:")
230
+ for out in a.outputs:
231
+ t = out.type or "any"
232
+ lines.append(f" - {out.name}: {t}")
233
+ lines.append("")
234
+ return "\n".join(lines)
235
+
236
+ def pretty_print(
237
+ self,
238
+ *,
239
+ flow_ids: list[str] | None = None,
240
+ kinds: Iterable[Literal["graph", "graphfn"]] | None = ("graph", "graphfn"),
241
+ include_global: bool = True,
242
+ ) -> str:
243
+ """
244
+ Human-readable table for planner prompts.
245
+
246
+ Emphasizes the short action name to be used in the \"action\" field
247
+ of the plan JSON. The internal ref is shown only as secondary info.
248
+ """
249
+ actions = self.list_actions(
250
+ flow_ids=flow_ids,
251
+ kinds=kinds,
252
+ include_global=include_global,
253
+ )
254
+ lines: list[str] = []
255
+
256
+ for a in actions:
257
+ inputs = ", ".join(f"{s.name}:{s.type or 'any'}" for s in a.inputs)
258
+ outputs = ", ".join(f"{s.name}:{s.type or 'any'}" for s in a.outputs)
259
+ tag_str = ", ".join(a.tags or [])
260
+
261
+ lines.append(f"- action: {a.name} [{a.kind}]")
262
+ lines.append(f" description: {a.description or '-'}")
263
+ lines.append(f" inputs: {inputs or 'none'}")
264
+ lines.append(f" outputs: {outputs or 'none'}")
265
+ if tag_str:
266
+ lines.append(f" tags: {tag_str}")
267
+ # keep ref but label as internal; this discourages the LLM from using it
268
+ lines.append(f" internal_ref: {a.ref}")
269
+ lines.append("") # blank line between actions
270
+
271
+ return "\n".join(lines)
@@ -0,0 +1,56 @@
1
+ # aethergraph/services/planning/bindings.py
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass
6
+ from typing import Any, Literal
7
+
8
+
9
+ @dataclass
10
+ class InputBinding:
11
+ kind: Literal["literal", "external", "step_output"]
12
+ value: Any
13
+ source_step_id: str | None = None
14
+ source_output_name: str | None = None
15
+ external_key: str | None = None
16
+
17
+
18
+ def parse_binding(raw: Any) -> InputBinding:
19
+ """
20
+ Parses a raw binding representation into an InputBinding object.
21
+
22
+ Rules:
23
+ - If `raw` is not a string, it is treated as a literal value. (which may include numbers, booleans, lists, dicts, etc.)
24
+ - If `raw` is a string in the format `${user.key}`, it is treated as an external binding.
25
+ - If `raw` is a string in the format `${step_id.output_name}`, it is treated as a step output binding.
26
+
27
+ Args:
28
+ raw (Any): The raw binding representation, which can be a literal value or a dict specifying the binding type.
29
+
30
+ Returns:
31
+ InputBinding: The parsed InputBinding object.
32
+ """
33
+ if not isinstance(raw, str):
34
+ return InputBinding(kind="literal", value=raw)
35
+
36
+ if raw.startswith("${") and raw.endswith("}"):
37
+ inner = raw[2:-1].strip()
38
+ if inner.startswith("user."):
39
+ key = inner.split(".", 1)[1]
40
+ return InputBinding(kind="external", value=None, external_key=key)
41
+
42
+ # step_id.output_name
43
+ parts = inner.split(".", 1)
44
+ if len(parts) == 2:
45
+ step_id, output_name = parts
46
+ return InputBinding(
47
+ kind="step_output",
48
+ value=None,
49
+ source_step_id=step_id,
50
+ source_output_name=output_name,
51
+ )
52
+
53
+ # Fallback to literal if unrecognized
54
+ return InputBinding(kind="literal", value=raw)
55
+
56
+ return InputBinding(kind="literal", value=raw)