ovos-agentic-loop 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,16 @@
1
+ # Licensed under the Apache License, Version 2.0 (the "License");
2
+ # you may not use this file except in compliance with the License.
3
+ # You may obtain a copy of the License at
4
+ #
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ #
7
+ # Unless required by applicable law or agreed to in writing, software
8
+ # distributed under the License is distributed on an "AS IS" BASIS,
9
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ # See the License for the specific language governing permissions and
11
+ # limitations under the License.
12
+
13
+ """ovos-agentic-loop — AgenticLoopEngine base and ReAct implementation."""
14
+ from ovos_agentic_loop.version import __version__
15
+
16
+ __all__ = ["__version__"]
@@ -0,0 +1,143 @@
1
+ # Licensed under the Apache License, Version 2.0 (the "License");
2
+ # you may not use this file except in compliance with the License.
3
+ # You may obtain a copy of the License at
4
+ #
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ #
7
+ # Unless required by applicable law or agreed to in writing, software
8
+ # distributed under the License is distributed on an "AS IS" BASIS,
9
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ # See the License for the specific language governing permissions and
11
+ # limitations under the License.
12
+
13
+ """AgenticLoopEngine — base class for agent-loop ChatEngine plugins."""
14
+ import abc
15
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional
16
+
17
+ from ovos_plugin_manager.templates.agents import AgentMessage, ChatEngine
18
+ from ovos_utils.log import LOG
19
+
20
+ if TYPE_CHECKING:
21
+ from ovos_plugin_manager.templates.agent_tools import ToolBox
22
+
23
+
24
+ class AgenticLoopEngine(ChatEngine):
25
+ """
26
+ A ``ChatEngine`` subclass for plugins that implement an internal agent loop
27
+ (e.g. ReAct, tool-call/observe cycles, background worker agents).
28
+
29
+ From the perspective of a ``PersonaService`` or any caller, an
30
+ ``AgenticLoopEngine`` is identical to a ``ChatEngine`` — it receives a list
31
+ of ``AgentMessage`` objects and returns one. All loop mechanics, tool
32
+ dispatch, retries, and background tasks are implementation details hidden
33
+ inside the plugin.
34
+
35
+ The ``toolboxes`` attribute gives persona configs a standard place to inject
36
+ ``ToolBox`` instances. Plugins are free to discover and load additional
37
+ toolboxes internally via ``_load_toolboxes_from_config``.
38
+
39
+ Entry point group: ``opm.agents.chat``
40
+ """
41
+
42
+ def __init__(self, config: Optional[Dict[str, Any]] = None) -> None:
43
+ """
44
+ Initialise the engine and optionally load toolboxes from config.
45
+
46
+ Args:
47
+ config: Plugin-specific configuration dictionary. May contain a
48
+ ``"toolboxes"`` key listing toolbox plugin IDs to load.
49
+ """
50
+ super().__init__(config=config)
51
+ self.toolboxes: "List[ToolBox]" = []
52
+ self._load_toolboxes_from_config()
53
+
54
+ def load_toolboxes(self, toolboxes: List[Any]) -> None:
55
+ """
56
+ Register a list of ``ToolBox`` instances with this engine.
57
+
58
+ Replaces any previously registered toolboxes. If the engine already
59
+ has a ``brain`` wired in, it is propagated to toolboxes that expose a
60
+ ``set_brain`` method (e.g. ``SkillMDToolBox``).
61
+
62
+ Args:
63
+ toolboxes: Instantiated ``ToolBox`` objects to make available to
64
+ the agent loop.
65
+ """
66
+ self.toolboxes = list(toolboxes)
67
+ brain = getattr(self, "_brain", None)
68
+ if brain is not None:
69
+ self._inject_brain_into_toolboxes(brain)
70
+
71
+ def _inject_brain_into_toolboxes(self, brain: Any) -> None:
72
+ """
73
+ Propagate a brain ``ChatEngine`` to toolboxes that require one.
74
+
75
+ Calls ``toolbox.set_brain(brain)`` on every registered toolbox that
76
+ exposes a ``set_brain`` method (duck-type check). This ensures that
77
+ toolboxes like ``SkillMDToolBox`` — which use an inner LLM for
78
+ tool-call routing — receive the brain automatically when it is set on
79
+ the engine, regardless of load order.
80
+
81
+ Args:
82
+ brain: The ``ChatEngine`` instance to propagate.
83
+ """
84
+ for tb in self.toolboxes:
85
+ if callable(getattr(tb, "set_brain", None)):
86
+ try:
87
+ tb.set_brain(brain)
88
+ except Exception as exc: # noqa: BLE001
89
+ LOG.warning(
90
+ f"AgenticLoopEngine: failed to inject brain into "
91
+ f"{type(tb).__name__}: {exc}"
92
+ )
93
+
94
+ def _load_toolboxes_from_config(self) -> None:
95
+ """
96
+ Discover and instantiate toolboxes declared in ``config["toolboxes"]``.
97
+
98
+ Reads a list of toolbox plugin IDs from the plugin config, calls OPM to
99
+ find matching ``ToolBox`` plugins, and populates ``self.toolboxes``.
100
+ Brain injection is deferred — toolboxes are wired after the engine's
101
+ ``_brain`` attribute is set (see ``_inject_brain_into_toolboxes``).
102
+ Does nothing if the config key is absent or OPM is unavailable.
103
+ """
104
+ toolbox_ids: List[str] = self.config.get("toolboxes", [])
105
+ if not toolbox_ids:
106
+ return
107
+ try:
108
+ from ovos_plugin_manager.agent_tools import load_toolbox_plugin
109
+ except ImportError:
110
+ LOG.debug("AgenticLoopEngine: ovos_plugin_manager.agent_tools not available; "
111
+ "skipping toolbox auto-load")
112
+ return
113
+
114
+ for tid in toolbox_ids:
115
+ try:
116
+ plugin = load_toolbox_plugin(tid, config=self.config.get(tid, {}))
117
+ if plugin is not None:
118
+ self.toolboxes.append(plugin)
119
+ except Exception as exc: # noqa: BLE001
120
+ LOG.warning(f"AgenticLoopEngine: failed to load toolbox '{tid}': {exc}")
121
+
122
+ @abc.abstractmethod
123
+ def continue_chat(self, messages: List[AgentMessage],
124
+ session_id: str = "default",
125
+ lang: Optional[str] = None,
126
+ units: Optional[str] = None) -> AgentMessage:
127
+ """
128
+ Run the agent loop and return the final response.
129
+
130
+ The implementation is responsible for all internal steps: tool
131
+ selection, execution, observation, and iteration. The caller always
132
+ receives a single ``AgentMessage`` with ``MessageRole.ASSISTANT``.
133
+
134
+ Args:
135
+ messages: Full conversation history including the latest user turn.
136
+ session_id: Conversation session identifier.
137
+ lang: BCP-47 language code.
138
+ units: Preferred measurement system (``"metric"`` / ``"imperial"``).
139
+
140
+ Returns:
141
+ The assistant's final response after the loop has completed.
142
+ """
143
+ raise NotImplementedError()
@@ -0,0 +1,182 @@
1
+ # Copyright 2025, OpenVoiceOS
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """ChainOfThoughtEngine — structured step-by-step reasoning without tool calls.
15
+
16
+ Based on "Chain-of-Thought Prompting Elicits Reasoning in Large Language Models"
17
+ (Wei et al., 2022 — https://arxiv.org/abs/2201.11903) and the follow-up
18
+ zero-shot variant "Large Language Models are Zero-Shot Reasoners" (Kojima et al.,
19
+ 2022 — https://arxiv.org/abs/2205.11916).
20
+
21
+ Algorithm
22
+ ---------
23
+ A single LLM call is made with a system prompt that instructs the model to
24
+ reason step by step before giving its final answer. The final answer is
25
+ extracted from a ``FINAL ANSWER:`` marker; if absent the full response is
26
+ returned.
27
+
28
+ This is the **simplest possible agent loop** — one LLM call, no tools, no
29
+ iteration. It is the recommended baseline for reasoning-heavy tasks that do
30
+ not require external information, and it is the inner building block used by
31
+ all more complex engines.
32
+
33
+ Key differences from ReAct
34
+ --------------------------
35
+ - No tools, no observation loop.
36
+ - Single LLM call — fastest and cheapest.
37
+ - Produces human-readable reasoning traces naturally.
38
+ - Best for arithmetic, logic, common-sense reasoning, multi-step instructions.
39
+ """
40
+ from typing import Any, Dict, List, Optional
41
+
42
+ from ovos_plugin_manager.templates.agents import AgentMessage, ChatEngine, MessageRole
43
+
44
+ from ovos_agentic_loop.base import AgenticLoopEngine
45
+
46
+ _COT_SYSTEM_PROMPT = """\
47
+ Think through the problem step by step before giving your final answer.
48
+
49
+ Format your response as:
50
+ Step 1: <reasoning>
51
+ Step 2: <reasoning>
52
+ ...
53
+ FINAL ANSWER: <concise answer>
54
+
55
+ Rules:
56
+ - Work through every relevant step explicitly.
57
+ - Do not skip steps.
58
+ - Put ONLY the final answer after "FINAL ANSWER:" — no extra reasoning.
59
+ """
60
+
61
+ _FINAL_ANSWER_MARKER = "FINAL ANSWER:"
62
+
63
+
64
+ def _extract_final_answer(text: str) -> Optional[str]:
65
+ """
66
+ Extract the text following ``FINAL ANSWER:`` in LLM output.
67
+
68
+ Args:
69
+ text: Raw LLM output.
70
+
71
+ Returns:
72
+ The answer string, or ``None`` if the marker is absent.
73
+ """
74
+ idx = text.upper().find(_FINAL_ANSWER_MARKER.upper())
75
+ if idx == -1:
76
+ return None
77
+ return text[idx + len(_FINAL_ANSWER_MARKER):].strip()
78
+
79
+
80
+ class ChainOfThoughtEngine(AgenticLoopEngine):
81
+ """
82
+ ``AgenticLoopEngine`` implementing zero-shot Chain-of-Thought prompting.
83
+
84
+ Adds a "think step by step" system prompt to every request and extracts
85
+ the ``FINAL ANSWER:`` from the structured response. No tools, no loop —
86
+ a single LLM call per ``continue_chat`` invocation.
87
+
88
+ This is the recommended baseline for reasoning tasks (arithmetic, logic,
89
+ multi-step instructions) that do not require external information.
90
+ Registered toolboxes are ignored.
91
+
92
+ Config keys:
93
+
94
+ - ``brain`` (str): ChatEngine plugin ID used as the inner LLM.
95
+ - ``system_prompt`` (str): Optional extra system context prepended before
96
+ the CoT instruction.
97
+
98
+ Entry point group: ``opm.agents.chat``
99
+ """
100
+
101
+ def __init__(self, config: Optional[Dict[str, Any]] = None) -> None:
102
+ """
103
+ Initialise the Chain-of-Thought engine.
104
+
105
+ Args:
106
+ config: Plugin configuration dict.
107
+ """
108
+ super().__init__(config=config)
109
+ self._brain: Optional[ChatEngine] = None
110
+
111
+ @property
112
+ def brain(self) -> Optional[ChatEngine]:
113
+ """The inner ChatEngine used for the single LLM call."""
114
+ if self._brain is None:
115
+ self._brain = self._load_brain()
116
+ return self._brain
117
+
118
+ def set_brain(self, brain: ChatEngine) -> None:
119
+ """
120
+ Inject a ChatEngine instance as the inner LLM.
121
+
122
+ Args:
123
+ brain: Instantiated ``ChatEngine``.
124
+ """
125
+ self._brain = brain
126
+ self._inject_brain_into_toolboxes(brain)
127
+
128
+ def _load_brain(self) -> Optional[ChatEngine]:
129
+ """
130
+ Load the brain ChatEngine from config using OPM.
131
+
132
+ Returns:
133
+ Instantiated ``ChatEngine``, or ``None`` if loading fails.
134
+ """
135
+ brain_id: str = self.config.get("brain", "")
136
+ if not brain_id:
137
+ return None
138
+ try:
139
+ from ovos_plugin_manager.agents import load_chat_plugin
140
+ return load_chat_plugin(brain_id, config=self.config.get(brain_id, {}))
141
+ except Exception: # noqa: BLE001
142
+ return None
143
+
144
+ def continue_chat(self, messages: List[AgentMessage],
145
+ session_id: str = "default",
146
+ lang: Optional[str] = None,
147
+ units: Optional[str] = None) -> AgentMessage:
148
+ """
149
+ Run a single CoT-prompted LLM call and return the final answer.
150
+
151
+ Prepends the CoT system instruction (and any ``system_prompt`` config
152
+ value) to the message list, calls the brain once, and extracts the
153
+ ``FINAL ANSWER:`` text. If the marker is absent the full response is
154
+ returned as-is.
155
+
156
+ Args:
157
+ messages: Conversation history including the latest user turn.
158
+ session_id: Session identifier forwarded to the brain.
159
+ lang: BCP-47 language code forwarded to the brain.
160
+ units: Measurement system forwarded to the brain.
161
+
162
+ Returns:
163
+ ``AgentMessage`` with ``MessageRole.ASSISTANT`` containing the
164
+ extracted final answer or the full CoT response.
165
+ """
166
+ if self.brain is None:
167
+ return AgentMessage(role=MessageRole.ASSISTANT,
168
+ content="Error: no brain configured.")
169
+
170
+ extra_prompt = self.config.get("system_prompt", "")
171
+ system_content = (extra_prompt + "\n\n" if extra_prompt else "") + _COT_SYSTEM_PROMPT
172
+
173
+ loop_messages = [
174
+ AgentMessage(role=MessageRole.SYSTEM, content=system_content),
175
+ *messages,
176
+ ]
177
+ response = self.brain.continue_chat(
178
+ loop_messages, session_id=session_id, lang=lang, units=units
179
+ )
180
+ final = _extract_final_answer(response.content)
181
+ content = final if final is not None else response.content
182
+ return AgentMessage(role=MessageRole.ASSISTANT, content=content)
@@ -0,0 +1,13 @@
1
+ # Licensed under the Apache License, Version 2.0 (the "License");
2
+ # you may not use this file except in compliance with the License.
3
+ # You may obtain a copy of the License at
4
+ #
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ #
7
+ # Unless required by applicable law or agreed to in writing, software
8
+ # distributed under the License is distributed on an "AS IS" BASIS,
9
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ # See the License for the specific language governing permissions and
11
+ # limitations under the License.
12
+
13
+ """AGENTS.md context manager integration."""
@@ -0,0 +1,274 @@
1
+ # Licensed under the Apache License, Version 2.0 (the "License");
2
+ # you may not use this file except in compliance with the License.
3
+ # You may obtain a copy of the License at
4
+ #
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ #
7
+ # Unless required by applicable law or agreed to in writing, software
8
+ # distributed under the License is distributed on an "AS IS" BASIS,
9
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ # See the License for the specific language governing permissions and
11
+ # limitations under the License.
12
+
13
+ """AgentsMDContextManager — builds agent system prompts from AGENTS.md files."""
14
+ import importlib.metadata
15
+ import os
16
+ import re
17
+ from typing import Dict, List, Optional
18
+
19
+ from ovos_plugin_manager.templates.agents import AgentContextManager, AgentMessage, MessageRole
20
+ from ovos_utils.log import LOG
21
+
22
+
23
+ def _parse_sections(text: str) -> Dict[str, str]:
24
+ """
25
+ Parse a markdown document into a dict mapping heading titles to section bodies.
26
+
27
+ Headings at any level (``#``, ``##``, etc.) are recognised. The section
28
+ body is everything from the heading line up to (but not including) the next
29
+ heading of the same or higher level.
30
+
31
+ Args:
32
+ text: Full markdown document text.
33
+
34
+ Returns:
35
+ ``{heading_title: section_body_text}`` — headings are stripped of
36
+ leading ``#`` characters and whitespace.
37
+ """
38
+ sections: Dict[str, str] = {}
39
+ pattern = re.compile(r"^(#{1,6})\s+(.+)$", re.MULTILINE)
40
+ matches = list(pattern.finditer(text))
41
+
42
+ for i, match in enumerate(matches):
43
+ heading = match.group(2).strip()
44
+ start = match.end()
45
+ end = matches[i + 1].start() if i + 1 < len(matches) else len(text)
46
+ sections[heading] = text[start:end].strip()
47
+
48
+ return sections
49
+
50
+
51
+ def _discover_agents_md_paths() -> List[str]:
52
+ """
53
+ Discover AGENTS.md files from installed package data.
54
+
55
+ Walks every installed distribution's recorded files looking for files
56
+ named ``AGENTS.md``.
57
+
58
+ Returns:
59
+ List of absolute paths that exist on disk.
60
+ """
61
+ paths: List[str] = []
62
+ try:
63
+ dists = importlib.metadata.distributions()
64
+ except Exception as exc: # noqa: BLE001
65
+ LOG.debug(f"AgentsMDContextManager: could not enumerate distributions: {exc}")
66
+ return paths
67
+
68
+ for dist in dists:
69
+ try:
70
+ files = dist.files or []
71
+ for f in files:
72
+ if f.name == "AGENTS.md":
73
+ abs_path = str(f.locate().resolve())
74
+ if os.path.isfile(abs_path):
75
+ paths.append(abs_path)
76
+ except Exception as exc: # noqa: BLE001
77
+ LOG.debug(f"AgentsMDContextManager: error scanning dist files: {exc}")
78
+ continue
79
+ return paths
80
+
81
+
82
+ class AgentsMDContextManager(AgentContextManager):
83
+ """
84
+ An ``AgentContextManager`` that assembles the agent system prompt by
85
+ loading selected sections from one or more ``AGENTS.md`` files.
86
+
87
+ The same AGENTS.md that governs Claude Code at dev-time can govern a
88
+ runtime LLM agent — both consumers read the same document.
89
+
90
+ Per-session conversation history is stored in an in-memory dict keyed by
91
+ ``session_id``.
92
+
93
+ Discovery:
94
+
95
+ 1. Config ``agents_md_sources`` — list of paths or ``["auto"]`` to trigger
96
+ automatic package-data discovery.
97
+ 2. ``extra_paths`` constructor argument — additional ad-hoc paths.
98
+
99
+ Config keys:
100
+
101
+ - ``agents_md_sources`` (List[str]): Paths to load, or ``["auto"]``.
102
+ - ``include_sections`` (List[str]): Heading titles to include (substring
103
+ match). If empty, all sections are included.
104
+ - ``system_prompt_prefix`` (str): Text prepended before the assembled
105
+ AGENTS.md content.
106
+
107
+ Entry point group: ``opm.agents.memory``
108
+ """
109
+
110
+ plugin_id = "ovos-agents-md-context-plugin"
111
+
112
+ def __init__(self, config: Optional[Dict] = None,
113
+ extra_paths: Optional[List[str]] = None) -> None:
114
+ """
115
+ Initialise the context manager.
116
+
117
+ Args:
118
+ config: Plugin configuration dictionary.
119
+ extra_paths: Additional AGENTS.md file paths to load.
120
+ """
121
+ super().__init__(config=config)
122
+ self._extra_paths: List[str] = list(extra_paths or [])
123
+ # Per-session history: {session_id: [AgentMessage, ...]}
124
+ self._sessions: Dict[str, List[AgentMessage]] = {}
125
+ self._system_prompt_cache: Optional[str] = None
126
+
127
+ # ------------------------------------------------------------------
128
+ # Configuration helpers
129
+ # ------------------------------------------------------------------
130
+
131
+ @property
132
+ def include_sections(self) -> List[str]:
133
+ """Heading substrings that filter which sections are included."""
134
+ return list((self.config or {}).get("include_sections", []))
135
+
136
+ @property
137
+ def system_prompt_prefix(self) -> str:
138
+ """Text prepended before assembled AGENTS.md content."""
139
+ return str((self.config or {}).get("system_prompt_prefix", ""))
140
+
141
+ # ------------------------------------------------------------------
142
+ # Path resolution
143
+ # ------------------------------------------------------------------
144
+
145
+ def _resolve_paths(self) -> List[str]:
146
+ """
147
+ Return deduplicated list of AGENTS.md paths to load.
148
+
149
+ Returns:
150
+ Absolute path list.
151
+ """
152
+ sources: List[str] = list((self.config or {}).get("agents_md_sources", ["auto"]))
153
+ paths: List[str] = []
154
+
155
+ if "auto" in sources:
156
+ paths.extend(_discover_agents_md_paths())
157
+ for src in sources:
158
+ if src != "auto" and os.path.isfile(src):
159
+ paths.append(src)
160
+ paths.extend(self._extra_paths)
161
+
162
+ # Deduplicate while preserving order.
163
+ seen: set = set()
164
+ result: List[str] = []
165
+ for p in paths:
166
+ if p not in seen:
167
+ seen.add(p)
168
+ result.append(p)
169
+ return result
170
+
171
+ # ------------------------------------------------------------------
172
+ # System prompt construction
173
+ # ------------------------------------------------------------------
174
+
175
+ def _build_system_prompt(self) -> str:
176
+ """
177
+ Load all AGENTS.md files, filter sections, and concatenate.
178
+
179
+ Returns:
180
+ The assembled system prompt string.
181
+ """
182
+ parts: List[str] = []
183
+ if self.system_prompt_prefix:
184
+ parts.append(self.system_prompt_prefix)
185
+
186
+ for path in self._resolve_paths():
187
+ try:
188
+ with open(path, encoding="utf-8") as fh:
189
+ text = fh.read()
190
+ except OSError as exc:
191
+ LOG.warning(f"AgentsMDContextManager: could not read '{path}': {exc}")
192
+ continue
193
+
194
+ sections = _parse_sections(text)
195
+ for heading, body in sections.items():
196
+ if self.include_sections:
197
+ if not any(inc.lower() in heading.lower()
198
+ for inc in self.include_sections):
199
+ continue
200
+ parts.append(f"## {heading}\n\n{body}")
201
+
202
+ return "\n\n".join(parts)
203
+
204
+ @property
205
+ def system_prompt(self) -> str:
206
+ """
207
+ Assembled system prompt derived from AGENTS.md sections.
208
+
209
+ Lazily computed and cached after the first access.
210
+ """
211
+ if self._system_prompt_cache is None:
212
+ self._system_prompt_cache = self._build_system_prompt()
213
+ return self._system_prompt_cache
214
+
215
+ def invalidate_cache(self) -> None:
216
+ """Force the system prompt to be rebuilt on next access."""
217
+ self._system_prompt_cache = None
218
+
219
+ # ------------------------------------------------------------------
220
+ # AgentContextManager protocol (full ABC compliance)
221
+ # ------------------------------------------------------------------
222
+
223
+ def get_history(self, session_id: str) -> List[AgentMessage]:
224
+ """
225
+ Retrieve the message history for a given session.
226
+
227
+ Args:
228
+ session_id: Conversation session identifier.
229
+
230
+ Returns:
231
+ Snapshot of the session's message list in chronological order.
232
+ """
233
+ return list(self._sessions.get(session_id, []))
234
+
235
+ def update_history(self, new_messages: List[AgentMessage], session_id: str) -> None:
236
+ """
237
+ Append new messages to a session's history.
238
+
239
+ Args:
240
+ new_messages: Messages to append.
241
+ session_id: Conversation session identifier.
242
+ """
243
+ if session_id not in self._sessions:
244
+ self._sessions[session_id] = []
245
+ self._sessions[session_id].extend(new_messages)
246
+
247
+ def build_conversation_context(
248
+ self,
249
+ utterance: str,
250
+ session_id: str,
251
+ ) -> List[AgentMessage]:
252
+ """
253
+ Assemble the full message list for an LLM call.
254
+
255
+ Prepends the system prompt (assembled from AGENTS.md), appends the
256
+ session's conversation history, and adds the current user utterance
257
+ as the final message.
258
+
259
+ Args:
260
+ utterance: The user's current input.
261
+ session_id: Conversation session identifier used to retrieve history.
262
+
263
+ Returns:
264
+ Ordered list of ``AgentMessage`` objects ready for a ChatEngine.
265
+ """
266
+ messages: List[AgentMessage] = []
267
+
268
+ prompt = self.system_prompt
269
+ if prompt:
270
+ messages.append(AgentMessage(role=MessageRole.SYSTEM, content=prompt))
271
+
272
+ messages.extend(self.get_history(session_id))
273
+ messages.append(AgentMessage(role=MessageRole.USER, content=utterance))
274
+ return messages