mycode-sdk 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mycode/__init__.py ADDED
@@ -0,0 +1,72 @@
1
+ """mycode — multi-turn tool-calling agent runtime.
2
+
3
+ Public API for embedding the agent loop in other Python applications. The
4
+ runtime ships four built-in coding tools (``read``, ``write``, ``edit``,
5
+ ``bash``) exposed as :data:`read_tool`, :data:`write_tool`, :data:`edit_tool`,
6
+ :data:`bash_tool` — pick the ones you want via ``tools=[...]`` rather than
7
+ silently exposing file system and shell access.
8
+ """
9
+
10
+ from mycode.agent import Agent, Event, PersistCallback
11
+ from mycode.messages import (
12
+ ContentBlock,
13
+ ConversationMessage,
14
+ assistant_message,
15
+ build_message,
16
+ document_block,
17
+ flatten_message_text,
18
+ image_block,
19
+ text_block,
20
+ thinking_block,
21
+ tool_result_block,
22
+ tool_use_block,
23
+ user_text_message,
24
+ )
25
+ from mycode.session import SessionStore
26
+ from mycode.tools import (
27
+ DEFAULT_TOOL_SPECS,
28
+ ToolContext,
29
+ ToolExecutionResult,
30
+ ToolExecutor,
31
+ ToolSpec,
32
+ cancel_all_tools,
33
+ tool,
34
+ )
35
+
36
+ __version__ = "0.4.0"
37
+
38
+ # Built-in tool specs exposed as module-level constants so callers can pick
39
+ # which ones to register (``tools=[read_tool, bash_tool]``) rather than
40
+ # getting all four by default.
41
+ read_tool, write_tool, edit_tool, bash_tool = DEFAULT_TOOL_SPECS
42
+
43
+ __all__ = [
44
+ "Agent",
45
+ "ContentBlock",
46
+ "ConversationMessage",
47
+ "DEFAULT_TOOL_SPECS",
48
+ "Event",
49
+ "PersistCallback",
50
+ "SessionStore",
51
+ "ToolContext",
52
+ "ToolExecutionResult",
53
+ "ToolExecutor",
54
+ "ToolSpec",
55
+ "__version__",
56
+ "assistant_message",
57
+ "bash_tool",
58
+ "build_message",
59
+ "cancel_all_tools",
60
+ "document_block",
61
+ "edit_tool",
62
+ "flatten_message_text",
63
+ "image_block",
64
+ "read_tool",
65
+ "text_block",
66
+ "thinking_block",
67
+ "tool",
68
+ "tool_result_block",
69
+ "tool_use_block",
70
+ "user_text_message",
71
+ "write_tool",
72
+ ]
mycode/agent.py ADDED
@@ -0,0 +1,616 @@
1
+ """Multi-turn agent loop.
2
+
3
+ :class:`Agent` drives one conversation. Each call to :meth:`Agent.achat`
4
+ runs one user turn and appends every emitted message to the on-disk
5
+ session log.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import asyncio
11
+ import logging
12
+ from collections.abc import AsyncIterator, Awaitable, Callable, Sequence
13
+ from dataclasses import dataclass, field
14
+ from pathlib import Path
15
+ from typing import Any, cast
16
+ from uuid import uuid4
17
+
18
+ from mycode.messages import (
19
+ ConversationMessage,
20
+ build_message,
21
+ flatten_message_text,
22
+ tool_result_block,
23
+ user_text_message,
24
+ )
25
+ from mycode.models import infer_provider_from_model, resolve_model_metadata
26
+ from mycode.providers import get_provider_adapter
27
+ from mycode.providers.base import ProviderAdapter, ProviderRequest, ProviderStreamEvent
28
+ from mycode.session import (
29
+ COMPACT_SUMMARY_PROMPT,
30
+ DEFAULT_COMPACT_THRESHOLD,
31
+ SessionStore,
32
+ apply_compact,
33
+ build_compact_event,
34
+ resolve_sessions_dir,
35
+ should_compact,
36
+ )
37
+ from mycode.tools import ToolExecutionResult, ToolExecutor, ToolSpec
38
+
39
+ logger = logging.getLogger(__name__)
40
+
41
+ PersistCallback = Callable[[ConversationMessage], Awaitable[None]]
42
+
43
+
44
+ @dataclass
45
+ class Event:
46
+ """Streaming event emitted by :meth:`Agent.achat`."""
47
+
48
+ type: str
49
+ data: dict[str, Any] = field(default_factory=dict)
50
+
51
+
52
+ class Agent:
53
+ """Multi-turn tool-calling agent runtime."""
54
+
55
+ def __init__(
56
+ self,
57
+ *,
58
+ model: str,
59
+ cwd: str,
60
+ provider: str | None = None,
61
+ session_id: str | None = None,
62
+ session_dir: Path | None = None,
63
+ api_key: str | None = None,
64
+ api_base: str | None = None,
65
+ messages: list[ConversationMessage] | None = None,
66
+ max_turns: int | None = None,
67
+ max_tokens: int | None = None,
68
+ context_window: int | None = None,
69
+ compact_threshold: float | None = None,
70
+ reasoning_effort: str | None = None,
71
+ supports_reasoning: bool | None = None,
72
+ supports_image_input: bool | None = None,
73
+ supports_pdf_input: bool | None = None,
74
+ system: str = "",
75
+ tools: ToolExecutor | Sequence[ToolSpec] | None = None,
76
+ ):
77
+ self.model = model
78
+ if provider is None:
79
+ inferred = infer_provider_from_model(model)
80
+ if inferred is None:
81
+ raise ValueError(f"could not infer provider for model {model!r}; pass provider= explicitly")
82
+ provider = inferred
83
+ self.provider = provider
84
+
85
+ self.cwd = str(Path(cwd).resolve(strict=False))
86
+ # If only ``session_dir`` is supplied, derive the id from its directory name.
87
+ self.session_id = (session_id or "").strip() or (session_dir.name if session_dir is not None else uuid4().hex)
88
+ self.session_dir = session_dir if session_dir is not None else resolve_sessions_dir() / self.session_id
89
+ self._store = SessionStore(data_dir=self.session_dir.parent)
90
+
91
+ self.api_key = api_key
92
+ self.api_base = api_base
93
+ self.max_turns = max_turns
94
+ self.compact_threshold = compact_threshold if compact_threshold is not None else DEFAULT_COMPACT_THRESHOLD
95
+ self.reasoning_effort = reasoning_effort
96
+
97
+ self.system = system
98
+ self._cancel_event = asyncio.Event()
99
+ self._provider_event_task: asyncio.Future[ProviderStreamEvent] | None = None
100
+
101
+ if messages is None:
102
+ data = self._store.load_session_sync(self.session_id)
103
+ messages = list(data["messages"]) if data is not None else []
104
+ self.messages: list[ConversationMessage] = list(messages)
105
+
106
+ if isinstance(tools, ToolExecutor):
107
+ self.tools = tools
108
+ else:
109
+ self.tools = ToolExecutor(
110
+ cwd=self.cwd,
111
+ session_dir=self.session_dir,
112
+ tools=list(tools) if tools is not None else [],
113
+ supports_image_input=False,
114
+ )
115
+
116
+ self.refresh_capabilities(
117
+ max_tokens=max_tokens,
118
+ context_window=context_window,
119
+ supports_reasoning=supports_reasoning,
120
+ supports_image_input=supports_image_input,
121
+ supports_pdf_input=supports_pdf_input,
122
+ )
123
+
124
+ def refresh_capabilities(
125
+ self,
126
+ *,
127
+ max_tokens: int | None = None,
128
+ context_window: int | None = None,
129
+ supports_reasoning: bool | None = None,
130
+ supports_image_input: bool | None = None,
131
+ supports_pdf_input: bool | None = None,
132
+ ) -> None:
133
+ """Resolve model capability fields against the metadata catalog.
134
+
135
+ Explicit arguments win; otherwise the bundled catalog supplies the value;
136
+ otherwise a conservative default is used. Call this after mutating
137
+ ``self.provider`` or ``self.model`` to re-derive the capability fields.
138
+ """
139
+
140
+ meta = resolve_model_metadata(
141
+ provider=self.provider,
142
+ model=self.model,
143
+ max_output_tokens=max_tokens,
144
+ context_window=context_window,
145
+ supports_reasoning=supports_reasoning,
146
+ supports_image_input=supports_image_input,
147
+ supports_pdf_input=supports_pdf_input,
148
+ )
149
+ self.max_tokens: int = meta.max_output_tokens or 16_384
150
+ self.context_window: int | None = meta.context_window or 128_000
151
+ self.supports_reasoning: bool | None = meta.supports_reasoning
152
+ self.supports_image_input: bool = bool(meta.supports_image_input)
153
+ self.supports_pdf_input: bool = bool(meta.supports_pdf_input)
154
+ self.tools.supports_image_input = self.supports_image_input
155
+
156
+ def cancel(self) -> None:
157
+ """Request cancellation of the in-flight turn."""
158
+
159
+ self._cancel_event.set()
160
+ self.tools.cancel_active()
161
+ if self._provider_event_task and not self._provider_event_task.done():
162
+ self._provider_event_task.cancel()
163
+
164
+ def clear(self) -> None:
165
+ """Drop the in-memory conversation history."""
166
+
167
+ self.messages = []
168
+
169
+ # ------------------------------------------------------------------
170
+ # Tool execution
171
+ # ------------------------------------------------------------------
172
+
173
+ async def _run_tool_call(self, tool_use: dict[str, Any]) -> AsyncIterator[Event]:
174
+ """Run one tool call and emit the standard tool events."""
175
+
176
+ tool_id = str(tool_use.get("id") or "")
177
+ name = str(tool_use.get("name") or "")
178
+ raw_args = tool_use.get("input")
179
+ args = raw_args if isinstance(raw_args, dict) else {}
180
+
181
+ yield Event("tool_start", {"tool_call": {"id": tool_id, "name": name, "input": args}})
182
+
183
+ if self._cancel_event.is_set():
184
+ yield self._tool_done_event(
185
+ tool_id,
186
+ ToolExecutionResult(
187
+ model_text="error: cancelled",
188
+ display_text="Cancelled",
189
+ is_error=True,
190
+ ),
191
+ )
192
+ return
193
+
194
+ spec = self.tools.get(name)
195
+ if spec is None:
196
+ yield self._tool_done_event(
197
+ tool_id,
198
+ ToolExecutionResult(
199
+ model_text=f"error: unknown tool: {name}",
200
+ display_text=f"Unknown tool: {name}",
201
+ is_error=True,
202
+ ),
203
+ )
204
+ return
205
+
206
+ if spec.streams_output:
207
+ async for event in self._run_streaming_tool(tool_id=tool_id, name=name, args=args):
208
+ yield event
209
+ return
210
+
211
+ try:
212
+ result = await asyncio.to_thread(self.tools.execute, name, args)
213
+ except Exception as exc: # pragma: no cover - defensive
214
+ result = ToolExecutionResult(
215
+ model_text=f"error: {exc}",
216
+ display_text=str(exc),
217
+ is_error=True,
218
+ )
219
+
220
+ yield self._tool_done_event(tool_id, result)
221
+
222
+ async def _run_streaming_tool(
223
+ self,
224
+ *,
225
+ tool_id: str,
226
+ name: str,
227
+ args: dict[str, Any],
228
+ ) -> AsyncIterator[Event]:
229
+ """Run one streaming tool, forwarding ``tool_output`` events live."""
230
+
231
+ loop = asyncio.get_running_loop()
232
+ output_queue: asyncio.Queue[str | None] = asyncio.Queue()
233
+
234
+ def on_output(line: str) -> None:
235
+ loop.call_soon_threadsafe(output_queue.put_nowait, line)
236
+
237
+ async def run_in_thread() -> ToolExecutionResult:
238
+ try:
239
+ return await asyncio.to_thread(
240
+ self.tools.execute,
241
+ name,
242
+ args,
243
+ tool_call_id=tool_id,
244
+ on_output=on_output,
245
+ )
246
+ finally:
247
+ loop.call_soon_threadsafe(output_queue.put_nowait, None)
248
+
249
+ task = asyncio.create_task(run_in_thread())
250
+ was_cancelled = False
251
+
252
+ while True:
253
+ if self._cancel_event.is_set() and not was_cancelled:
254
+ was_cancelled = True
255
+ self.tools.cancel_active()
256
+
257
+ try:
258
+ output = await asyncio.wait_for(output_queue.get(), timeout=0.1)
259
+ except TimeoutError:
260
+ if task.done():
261
+ break
262
+ continue
263
+
264
+ if output is None:
265
+ break
266
+ if not was_cancelled:
267
+ yield Event("tool_output", {"tool_use_id": tool_id, "output": output})
268
+
269
+ if was_cancelled:
270
+ try:
271
+ await task
272
+ except Exception:
273
+ pass
274
+ result = ToolExecutionResult(
275
+ model_text="error: cancelled",
276
+ display_text="Cancelled",
277
+ is_error=True,
278
+ )
279
+ else:
280
+ try:
281
+ result = await task
282
+ except Exception as exc: # pragma: no cover - defensive
283
+ result = ToolExecutionResult(
284
+ model_text=f"error: {exc}",
285
+ display_text=str(exc),
286
+ is_error=True,
287
+ )
288
+
289
+ yield self._tool_done_event(tool_id, result)
290
+
291
+ @staticmethod
292
+ def _tool_done_event(tool_id: str, result: ToolExecutionResult) -> Event:
293
+ data: dict[str, Any] = {
294
+ "tool_use_id": tool_id,
295
+ "model_text": result.model_text,
296
+ "display_text": result.display_text,
297
+ "is_error": result.is_error,
298
+ }
299
+ if result.content:
300
+ data["content"] = result.content
301
+ return Event("tool_done", data)
302
+
303
+ # ------------------------------------------------------------------
304
+ # Provider streaming
305
+ # ------------------------------------------------------------------
306
+
307
+ async def _stream_provider_turn(
308
+ self,
309
+ adapter: ProviderAdapter,
310
+ request: ProviderRequest,
311
+ ) -> AsyncIterator[ProviderStreamEvent]:
312
+ """Iterate one provider turn with best-effort cancellation support."""
313
+
314
+ provider_stream: AsyncIterator[ProviderStreamEvent] = adapter.stream_turn(request)
315
+
316
+ async def next_provider_event() -> ProviderStreamEvent:
317
+ return await anext(provider_stream)
318
+
319
+ try:
320
+ while True:
321
+ if self._cancel_event.is_set():
322
+ raise asyncio.CancelledError
323
+
324
+ self._provider_event_task = asyncio.create_task(next_provider_event())
325
+ try:
326
+ yield await self._provider_event_task
327
+ except StopAsyncIteration:
328
+ return
329
+ finally:
330
+ self._provider_event_task = None
331
+ finally:
332
+ close = cast(Callable[[], Awaitable[None]] | None, getattr(provider_stream, "aclose", None))
333
+ if close is not None:
334
+ try:
335
+ await close()
336
+ except Exception:
337
+ pass
338
+
339
+ # ------------------------------------------------------------------
340
+ # Public entry point
341
+ # ------------------------------------------------------------------
342
+
343
+ async def achat(
344
+ self,
345
+ user_input: str | ConversationMessage,
346
+ *,
347
+ on_persist: PersistCallback | None = None,
348
+ ) -> AsyncIterator[Event]:
349
+ """Run the full agent loop for one user message.
350
+
351
+ Each turn asks the provider for one assistant message. If the assistant
352
+ requests tools, the agent runs them locally, appends one user-side
353
+ tool_result message, and continues until the assistant stops using tools.
354
+
355
+ Every emitted message is appended to the session log. ``on_persist`` is
356
+ an optional hook fired *before* that append, useful for staging related
357
+ side effects (the web server uses it to land a rewind marker first).
358
+ """
359
+
360
+ async def persist(message: ConversationMessage) -> None:
361
+ if on_persist is not None:
362
+ await on_persist(message)
363
+ await self._store.append_message(
364
+ self.session_id,
365
+ message,
366
+ provider=self.provider,
367
+ model=self.model,
368
+ cwd=self.cwd,
369
+ api_base=self.api_base,
370
+ )
371
+
372
+ self._cancel_event.clear()
373
+ supports_image_input = self.supports_image_input
374
+ supports_pdf_input = self.supports_pdf_input
375
+ self.tools.supports_image_input = supports_image_input
376
+
377
+ if isinstance(user_input, str):
378
+ user_message = user_text_message(user_input)
379
+ else:
380
+ user_message: ConversationMessage = {
381
+ "role": str(user_input.get("role") or "user"),
382
+ "content": [dict(b) for b in user_input.get("content") or [] if isinstance(b, dict)],
383
+ }
384
+ raw_meta = user_input.get("meta")
385
+ if isinstance(raw_meta, dict):
386
+ user_message["meta"] = {str(k): v for k, v in raw_meta.items()}
387
+
388
+ if user_message.get("role") != "user":
389
+ yield Event("error", {"message": "user input must be a user message"})
390
+ return
391
+
392
+ if not supports_image_input and any(
393
+ isinstance(block, dict) and block.get("type") == "image" for block in user_message.get("content") or []
394
+ ):
395
+ yield Event("error", {"message": "current model does not support image input"})
396
+ return
397
+ if not supports_pdf_input and any(
398
+ isinstance(block, dict) and block.get("type") == "document" for block in user_message.get("content") or []
399
+ ):
400
+ yield Event("error", {"message": "current model does not support PDF input"})
401
+ return
402
+
403
+ self.messages.append(user_message)
404
+ await persist(user_message)
405
+
406
+ adapter = get_provider_adapter(self.provider)
407
+
408
+ turn_number = 0
409
+ while self.max_turns is None or turn_number < self.max_turns:
410
+ turn_number += 1
411
+ if self._cancel_event.is_set():
412
+ yield Event("error", {"message": "cancelled"})
413
+ return
414
+
415
+ assistant_message: ConversationMessage | None = None
416
+ request = ProviderRequest(
417
+ provider=self.provider,
418
+ model=self.model,
419
+ session_id=self.session_id,
420
+ messages=self.messages,
421
+ system=self.system,
422
+ tools=self.tools.definitions,
423
+ max_tokens=self.max_tokens,
424
+ api_key=self.api_key,
425
+ api_base=self.api_base,
426
+ reasoning_effort=self.reasoning_effort,
427
+ supports_image_input=supports_image_input,
428
+ supports_pdf_input=supports_pdf_input,
429
+ )
430
+
431
+ try:
432
+ # Phase 1: ask the provider for exactly one assistant turn.
433
+ async for provider_event in self._stream_provider_turn(adapter, request):
434
+ if self._cancel_event.is_set():
435
+ yield Event("error", {"message": "cancelled"})
436
+ return
437
+
438
+ if provider_event.type == "thinking_delta":
439
+ delta_text = str(provider_event.data.get("text") or "")
440
+ if delta_text:
441
+ yield Event("reasoning", {"delta": delta_text})
442
+ continue
443
+
444
+ if provider_event.type == "text_delta":
445
+ delta_text = str(provider_event.data.get("text") or "")
446
+ if delta_text:
447
+ yield Event("text", {"delta": delta_text})
448
+ continue
449
+
450
+ if provider_event.type == "provider_error":
451
+ raise ValueError(str(provider_event.data.get("message") or "provider error"))
452
+
453
+ if provider_event.type != "message_done":
454
+ continue
455
+
456
+ message = provider_event.data.get("message")
457
+ if isinstance(message, dict):
458
+ assistant_message = message
459
+
460
+ except asyncio.CancelledError:
461
+ yield Event("error", {"message": "cancelled"})
462
+ return
463
+ except Exception as exc:
464
+ logger.exception("Provider request failed")
465
+ yield Event("error", {"message": str(exc)})
466
+ return
467
+
468
+ if not assistant_message:
469
+ yield Event("error", {"message": "provider produced no assistant message"})
470
+ return
471
+
472
+ self.messages.append(assistant_message)
473
+ await persist(assistant_message)
474
+
475
+ # Phase 2: if the assistant requested tools, execute them locally and
476
+ # append one user-side tool_result message before continuing.
477
+ tool_calls = [
478
+ block
479
+ for block in assistant_message.get("content") or []
480
+ if isinstance(block, dict) and block.get("type") == "tool_use"
481
+ ]
482
+ if not tool_calls:
483
+ break
484
+
485
+ tool_results: list[dict[str, Any]] = []
486
+ for tool_call in tool_calls:
487
+ async for event in self._run_tool_call(tool_call):
488
+ yield event
489
+
490
+ if event.type != "tool_done":
491
+ continue
492
+
493
+ d = event.data
494
+ model_text = str(d.get("model_text") or "")
495
+ content = d.get("content")
496
+ tool_results.append(
497
+ tool_result_block(
498
+ tool_use_id=str(d.get("tool_use_id") or ""),
499
+ model_text=model_text,
500
+ display_text=str(d.get("display_text") or ""),
501
+ is_error=bool(d.get("is_error")),
502
+ content=content if isinstance(content, list) else None,
503
+ )
504
+ )
505
+
506
+ if model_text == "error: cancelled" and self._cancel_event.is_set():
507
+ tool_result_message = build_message("user", tool_results)
508
+ self.messages.append(tool_result_message)
509
+ await persist(tool_result_message)
510
+ return
511
+
512
+ tool_result_message = build_message("user", tool_results)
513
+ self.messages.append(tool_result_message)
514
+ await persist(tool_result_message)
515
+
516
+ else:
517
+ # while loop exhausted max_turns without breaking
518
+ yield Event("error", {"message": "max_turns reached"})
519
+ return
520
+
521
+ # Turn completed normally (assistant stopped calling tools).
522
+ # Check whether context compaction is needed.
523
+ if not self._cancel_event.is_set():
524
+ async for event in self._compact_if_needed(adapter, persist):
525
+ yield event
526
+
527
+ # ------------------------------------------------------------------
528
+ # Context compaction
529
+ # ------------------------------------------------------------------
530
+
531
+ async def _compact_if_needed(
532
+ self,
533
+ adapter: ProviderAdapter,
534
+ persist: PersistCallback,
535
+ ) -> AsyncIterator[Event]:
536
+ """Check token usage and run compaction if above threshold."""
537
+
538
+ usage: dict[str, Any] | None = None
539
+ for message in reversed(self.messages):
540
+ if message.get("role") == "assistant":
541
+ usage = (message.get("meta") or {}).get("usage")
542
+ break
543
+
544
+ if not should_compact(usage, self.context_window, self.compact_threshold):
545
+ return
546
+
547
+ try:
548
+ async for event in self._compact(adapter, persist):
549
+ yield event
550
+ except (Exception, asyncio.CancelledError):
551
+ logger.warning("Context compaction failed, continuing without compaction", exc_info=True)
552
+
553
+ async def _compact(
554
+ self,
555
+ adapter: ProviderAdapter,
556
+ persist: PersistCallback,
557
+ ) -> AsyncIterator[Event]:
558
+ """Generate a conversation summary and replace in-memory messages."""
559
+
560
+ compacted_count = len(self.messages)
561
+
562
+ # Ask the same provider for a summary — no tools, just text generation.
563
+ compact_messages = list(self.messages) + [user_text_message(COMPACT_SUMMARY_PROMPT)]
564
+ request = ProviderRequest(
565
+ provider=self.provider,
566
+ model=self.model,
567
+ session_id=self.session_id,
568
+ messages=compact_messages,
569
+ system=self.system,
570
+ tools=[],
571
+ max_tokens=min(self.max_tokens, 8192),
572
+ api_key=self.api_key,
573
+ api_base=self.api_base,
574
+ supports_image_input=self.supports_image_input,
575
+ supports_pdf_input=self.supports_pdf_input,
576
+ )
577
+
578
+ summary_message: ConversationMessage | None = None
579
+ async for provider_event in self._stream_provider_turn(adapter, request):
580
+ if provider_event.type == "message_done":
581
+ msg = provider_event.data.get("message")
582
+ if isinstance(msg, dict):
583
+ summary_message = msg
584
+
585
+ if not summary_message:
586
+ logger.warning("Compaction produced no response")
587
+ return
588
+
589
+ summary_text = flatten_message_text(summary_message, include_thinking=False)
590
+ if not summary_text:
591
+ logger.warning("Compaction produced empty summary")
592
+ return
593
+
594
+ summary_usage = (summary_message.get("meta") or {}).get("usage")
595
+ compact_event = build_compact_event(
596
+ summary_text,
597
+ provider=self.provider,
598
+ model=self.model,
599
+ compacted_count=compacted_count,
600
+ usage=summary_usage,
601
+ )
602
+
603
+ # Persist the compact event (append-only — original messages stay in JSONL).
604
+ await persist(compact_event)
605
+
606
+ # Rebuild in-memory messages from the compact event.
607
+ self.messages.append(compact_event)
608
+ self.messages = apply_compact(self.messages)
609
+
610
+ yield Event(
611
+ "compact",
612
+ {
613
+ "message": f"Context compacted ({compacted_count} messages → summary)",
614
+ "compacted_count": compacted_count,
615
+ },
616
+ )