aury-agent 0.0.4__py3-none-any.whl → 0.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -82,11 +82,14 @@ class MessageContextProvider(BaseContextProvider):
82
82
  type="truncated",
83
83
  limit=self.max_messages,
84
84
  )
85
- # Convert to LLM format
86
- return [
87
- {"role": m["role"], "content": m["content"]}
88
- for m in messages
89
- ]
85
+ # Convert to LLM format (include tool_call_id for tool messages)
86
+ result = []
87
+ for m in messages:
88
+ msg = {"role": m["role"], "content": m["content"]}
89
+ if m.get("tool_call_id"):
90
+ msg["tool_call_id"] = m["tool_call_id"]
91
+ result.append(msg)
92
+ return result
90
93
 
91
94
  # No backend available
92
95
  logger.warning(
aury/agents/core/base.py CHANGED
@@ -18,6 +18,8 @@ from dataclasses import dataclass, field
18
18
  from enum import Enum
19
19
  from typing import Any, AsyncIterator, Callable, ClassVar, Literal, TYPE_CHECKING
20
20
 
21
+ from .logging import logger
22
+
21
23
 
22
24
  class ToolInjectionMode(Enum):
23
25
  """How tools are provided to LLM.
@@ -139,6 +141,7 @@ class BaseAgent(ABC):
139
141
  """
140
142
  self._ctx = ctx
141
143
  self.config = config or AgentConfig()
144
+ logger.debug(f"{self.agent_type}Agent init, name={self.config.name}, invocation_id={ctx.invocation_id}, max_steps={self.config.max_steps}")
142
145
 
143
146
  # Abort signal (delegates to ctx.abort_*)
144
147
  self._abort = asyncio.Event()
@@ -237,12 +240,14 @@ class BaseAgent(ABC):
237
240
  "stream_thinking": stream_thinking,
238
241
  "llm": llm,
239
242
  }
243
+ logger.info(f"{self.agent_type}Agent run start, invocation_id={self._ctx.invocation_id}, name={self.config.name}")
240
244
 
241
245
  # Auto-detect parent queue (nested agent call)
242
246
  # Skip if _force_own_queue is True (delegate tool needs to capture events)
243
247
  if not _force_own_queue:
244
248
  try:
245
249
  parent_queue = _emit_queue_var.get()
250
+ logger.debug(f"{self.agent_type}Agent: nested call detected, reusing parent queue, invocation_id={self._ctx.invocation_id}")
246
251
  # Has parent - passthrough mode, reuse parent queue
247
252
  # Still set our ctx (child agent has its own ctx)
248
253
  ctx_token = _set_current_ctx(self._ctx)
@@ -254,6 +259,7 @@ class BaseAgent(ABC):
254
259
  except LookupError:
255
260
  pass
256
261
 
262
+ logger.debug(f"{self.agent_type}Agent: creating own queue for this run, invocation_id={self._ctx.invocation_id}")
257
263
  # No parent - create own queue and yield events
258
264
  queue: asyncio.Queue[BlockEvent | ActionEvent] = asyncio.Queue()
259
265
  queue_token = _emit_queue_var.set(queue)
@@ -331,7 +337,11 @@ class BaseAgent(ABC):
331
337
 
332
338
  # Check for exceptions in execution
333
339
  await exec_task
340
+ logger.info(f"{self.agent_type}Agent run completed successfully, invocation_id={self._ctx.invocation_id}")
334
341
 
342
+ except Exception as e:
343
+ logger.error(f"{self.agent_type}Agent run error, error={type(e).__name__}, invocation_id={self._ctx.invocation_id}", exc_info=True)
344
+ raise
335
345
  finally:
336
346
  _reset_current_ctx(ctx_token)
337
347
  _emit_queue_var.reset(queue_token)
@@ -354,6 +364,7 @@ class BaseAgent(ABC):
354
364
  Args:
355
365
  abort_chain: If True, abort entire invocation chain (SubAgents too)
356
366
  """
367
+ logger.warning(f"{self.agent_type}Agent cancel requested, abort_chain={abort_chain}, invocation_id={self._ctx.invocation_id if self._ctx else 'N/A'}")
357
368
  self._abort.set()
358
369
  if self._ctx:
359
370
  if abort_chain:
@@ -12,6 +12,7 @@ from __future__ import annotations
12
12
  from typing import Any, TYPE_CHECKING
13
13
 
14
14
  from .base import AgentConfig
15
+ from .logging import context_logger as logger
15
16
 
16
17
  if TYPE_CHECKING:
17
18
  from .base import BaseAgent
@@ -51,6 +52,7 @@ class AgentFactory:
51
52
  name: Name to register under
52
53
  agent_class: Agent class (must have unified constructor)
53
54
  """
55
+ logger.debug(f"AgentFactory.register: registering agent, name={name}, class={agent_class.__name__}")
54
56
  self._registry[name] = agent_class
55
57
 
56
58
  def register_class(self, agent_class: type["BaseAgent"]) -> None:
@@ -60,6 +62,7 @@ class AgentFactory:
60
62
  agent_class: Agent class with 'name' class attribute
61
63
  """
62
64
  name = getattr(agent_class, 'name', agent_class.__name__)
65
+ logger.debug(f"AgentFactory.register_class: registering agent, name={name}, class={agent_class.__name__}")
63
66
  self._registry[name] = agent_class
64
67
 
65
68
  def register_all(self, *agent_classes: type["BaseAgent"]) -> None:
@@ -68,6 +71,7 @@ class AgentFactory:
68
71
  Args:
69
72
  agent_classes: Agent classes with 'name' class attribute
70
73
  """
74
+ logger.debug(f"AgentFactory.register_all: registering {len(agent_classes)} agents")
71
75
  for agent_class in agent_classes:
72
76
  self.register_class(agent_class)
73
77
 
@@ -95,11 +99,13 @@ class AgentFactory:
95
99
  """
96
100
  if agent_type not in self._registry:
97
101
  available = ", ".join(self._registry.keys()) or "none"
102
+ logger.error(f"AgentFactory.create: unknown agent type, type={agent_type}, available={available}, invocation_id={ctx.invocation_id}")
98
103
  raise KeyError(
99
104
  f"Unknown agent type: {agent_type}. Available: {available}"
100
105
  )
101
106
 
102
107
  agent_class = self._registry[agent_type]
108
+ logger.debug(f"AgentFactory.create: creating agent, type={agent_type}, class={agent_class.__name__}, invocation_id={ctx.invocation_id}")
103
109
  return agent_class(ctx, config)
104
110
 
105
111
  def create_subagent(
@@ -122,7 +128,9 @@ class AgentFactory:
122
128
  Returns:
123
129
  Agent instance with child context
124
130
  """
131
+ logger.debug(f"AgentFactory.create_subagent: creating subagent, type={agent_type}, mode={mode}, parent_invocation_id={parent_ctx.invocation_id}")
125
132
  child_ctx = parent_ctx.create_child(agent_id=agent_type, mode=mode)
133
+ logger.debug(f"AgentFactory.create_subagent: child context created, child_invocation_id={child_ctx.invocation_id}")
126
134
  return self.create(agent_type, child_ctx, config)
127
135
 
128
136
  def list_types(self) -> list[str]:
@@ -8,6 +8,8 @@ from __future__ import annotations
8
8
  import asyncio
9
9
  from typing import Any, AsyncGenerator, TYPE_CHECKING
10
10
 
11
+ from .logging import context_logger as logger
12
+
11
13
  if TYPE_CHECKING:
12
14
  from .types.block import BlockEvent
13
15
 
@@ -36,41 +38,51 @@ async def merge_agent_runs(
36
38
  print(event)
37
39
  """
38
40
  if not agent_runs:
41
+ logger.debug("merge_agent_runs: no agent runs provided")
39
42
  return
40
43
 
44
+ logger.debug(f"merge_agent_runs: starting merge with {len(agent_runs)} agent runs")
41
45
  sentinel = object()
42
46
  queue: asyncio.Queue[tuple[Any, asyncio.Event | None]] = asyncio.Queue()
43
47
 
44
- async def process_agent(events: AsyncGenerator["BlockEvent", None]) -> None:
48
+ async def process_agent(events: AsyncGenerator["BlockEvent", None], agent_idx: int) -> None:
45
49
  """Process single agent's events."""
50
+ event_count = 0
46
51
  try:
47
52
  async for event in events:
53
+ event_count += 1
48
54
  # Create resume signal to wait for consumer
49
55
  resume_signal = asyncio.Event()
50
56
  await queue.put((event, resume_signal))
51
57
  # Wait for upstream to consume before generating more
52
58
  await resume_signal.wait()
53
59
  finally:
60
+ logger.debug(f"merge_agent_runs: agent #{agent_idx} completed with {event_count} events")
54
61
  # Mark this agent as finished
55
62
  await queue.put((sentinel, None))
56
63
 
57
64
  # Use TaskGroup for parallel execution (Python 3.11+)
58
65
  async with asyncio.TaskGroup() as tg:
59
- for events in agent_runs:
60
- tg.create_task(process_agent(events))
66
+ for idx, events in enumerate(agent_runs):
67
+ tg.create_task(process_agent(events, idx))
61
68
 
62
69
  sentinel_count = 0
70
+ total_events = 0
63
71
  # Run until all agents finished
64
72
  while sentinel_count < len(agent_runs):
65
73
  item, resume_signal = await queue.get()
66
74
 
67
75
  if item is sentinel:
68
76
  sentinel_count += 1
77
+ logger.debug(f"merge_agent_runs: agent finished, {sentinel_count}/{len(agent_runs)} agents done")
69
78
  else:
79
+ total_events += 1
70
80
  yield item
71
81
  # Signal agent to continue
72
82
  if resume_signal:
73
83
  resume_signal.set()
84
+
85
+ logger.debug(f"merge_agent_runs: merge completed, total_events={total_events}")
74
86
 
75
87
 
76
88
  async def run_agents_parallel(
@@ -105,24 +117,30 @@ async def run_agents_parallel(
105
117
  timeout=60.0,
106
118
  )
107
119
  """
120
+ logger.debug(f"run_agents_parallel: starting {len(agent_tasks)} agents, timeout={timeout}")
108
121
  results: dict[str, Any] = {}
109
122
 
110
123
  async def run_one(name: str, input: Any) -> tuple[str, Any]:
111
124
  try:
125
+ logger.debug(f"run_agents_parallel: running agent, name={name}")
112
126
  result = await agent_runner(name, input)
127
+ logger.info(f"run_agents_parallel: agent completed, name={name}")
113
128
  return (name, result)
114
129
  except Exception as e:
130
+ logger.error(f"run_agents_parallel: agent failed, name={name}, error={type(e).__name__}", exc_info=True)
115
131
  return (name, {"error": str(e)})
116
132
 
117
133
  tasks = [run_one(name, input) for name, input in agent_tasks]
118
134
 
119
135
  if timeout:
120
136
  try:
137
+ logger.debug(f"run_agents_parallel: waiting for agents with timeout={timeout}s")
121
138
  completed = await asyncio.wait_for(
122
139
  asyncio.gather(*tasks, return_exceptions=True),
123
140
  timeout=timeout,
124
141
  )
125
142
  except asyncio.TimeoutError:
143
+ logger.warning(f"run_agents_parallel: timeout after {timeout}s, marking incomplete agents as timed out")
126
144
  # Return partial results with timeout error for incomplete
127
145
  for name, _ in agent_tasks:
128
146
  if name not in results:
@@ -137,8 +155,9 @@ async def run_agents_parallel(
137
155
  results[name] = result
138
156
  elif isinstance(item, Exception):
139
157
  # Exception from gather
140
- pass
158
+ logger.warning(f"run_agents_parallel: gathered exception: {type(item).__name__}")
141
159
 
160
+ logger.info(f"run_agents_parallel: completed, total_agents={len(agent_tasks)}, success={len(results)}, errors={len([r for r in results.values() if isinstance(r, dict) and 'error' in r])}")
142
161
  return results
143
162
 
144
163
 
@@ -164,17 +183,20 @@ class ParallelSubAgentContext:
164
183
  """Create isolated branch for sub-agent."""
165
184
  branch_id = f"{self.parent_invocation_id}.{agent_name}"
166
185
  self.branches[agent_name] = branch_id
186
+ logger.debug(f"ParallelSubAgentContext: created branch, agent={agent_name}, branch_id={branch_id}")
167
187
  return branch_id
168
188
 
169
189
  def mark_completed(self, agent_name: str, result: Any) -> None:
170
190
  """Mark agent as completed with result."""
171
191
  self._completed.add(agent_name)
172
192
  self.results[agent_name] = result
193
+ logger.info(f"ParallelSubAgentContext: agent completed, agent={agent_name}, pending={len(self.pending_agents)}")
173
194
 
174
195
  def mark_failed(self, agent_name: str, error: str) -> None:
175
196
  """Mark agent as failed with error."""
176
197
  self._completed.add(agent_name)
177
198
  self.errors[agent_name] = error
199
+ logger.warning(f"ParallelSubAgentContext: agent failed, agent={agent_name}, error={error}")
178
200
 
179
201
  @property
180
202
  def all_completed(self) -> bool:
aury/agents/core/state.py CHANGED
@@ -273,8 +273,18 @@ class State:
273
273
  if not self._dirty:
274
274
  return
275
275
 
276
+ from ..core.logging import context_logger as logger
277
+
276
278
  # Use invocation_id if set, otherwise fall back to session_id
277
279
  key = self._invocation_id or self._session_id
280
+ logger.debug(
281
+ "Checkpointing state",
282
+ extra={
283
+ "session_id": self._session_id,
284
+ "invocation_id": self._invocation_id,
285
+ "partitions": list(self._data.keys()),
286
+ },
287
+ )
278
288
  await self._backend.set("state", key, self._data)
279
289
  self._dirty = False
280
290
 
@@ -287,9 +297,19 @@ class State:
287
297
  Returns:
288
298
  True if state was restored, False if no saved state
289
299
  """
300
+ from ..core.logging import context_logger as logger
301
+
290
302
  key = invocation_id or self._invocation_id or self._session_id
291
303
  data = await self._backend.get("state", key)
292
304
  if data:
305
+ logger.info(
306
+ "State restored from backend",
307
+ extra={
308
+ "session_id": self._session_id,
309
+ "invocation_id": invocation_id or self._invocation_id,
310
+ "partitions": list(data.keys()),
311
+ },
312
+ )
293
313
  self._data = data
294
314
  # Ensure all partitions exist
295
315
  for partition in ("vars", "data", "execution"):
@@ -299,6 +319,11 @@ class State:
299
319
  if invocation_id:
300
320
  self._invocation_id = invocation_id
301
321
  return True
322
+ else:
323
+ logger.debug(
324
+ "No saved state found",
325
+ extra={"session_id": self._session_id, "key": key},
326
+ )
302
327
  return False
303
328
 
304
329
  # ========== Utility ==========
@@ -198,6 +198,7 @@ class ToolConfig:
198
198
  timeout: float | None = None # Execution timeout in seconds
199
199
  requires_permission: bool = False # Needs HITL approval
200
200
  permission_message: str | None = None
201
+ stream_arguments: bool = False # Stream tool arguments to client
201
202
 
202
203
  # Retry configuration
203
204
  max_retries: int = 0 # 0 = no retry
@@ -85,8 +85,19 @@ class AskUserTool(BaseTool):
85
85
  # If no context, return error (should not happen in normal use)
86
86
  return ToolResult.error("Cannot ask user without execution context")
87
87
 
88
+ from ..core.logging import tool_logger as logger
89
+
88
90
  # Generate request ID
89
91
  request_id = generate_id("req")
92
+ logger.info(
93
+ "ask_user HITL request",
94
+ extra={
95
+ "invocation_id": ctx.invocation_id,
96
+ "request_id": request_id,
97
+ "question": question[:100],
98
+ "has_options": options is not None,
99
+ },
100
+ )
90
101
 
91
102
  # Create HITL request data
92
103
  request = HITLRequest(
@@ -100,6 +111,10 @@ class AskUserTool(BaseTool):
100
111
 
101
112
  # Checkpoint current state
102
113
  if hasattr(ctx, "state") and ctx.state is not None:
114
+ logger.debug(
115
+ "Checkpointing state before HITL suspend",
116
+ extra={"invocation_id": ctx.invocation_id},
117
+ )
103
118
  await ctx.state.checkpoint()
104
119
 
105
120
  # Update invocation status to SUSPENDED
@@ -124,6 +139,13 @@ class AskUserTool(BaseTool):
124
139
  ))
125
140
 
126
141
  # Raise signal to suspend execution
142
+ logger.info(
143
+ "Suspending execution for HITL ask_user",
144
+ extra={
145
+ "invocation_id": ctx.invocation_id,
146
+ "request_id": request_id,
147
+ },
148
+ )
127
149
  raise HITLSuspend(
128
150
  request_id=request_id,
129
151
  request_type="ask_user",
@@ -184,7 +206,18 @@ class ConfirmTool(BaseTool):
184
206
  if ctx is None:
185
207
  return ToolResult.error("Cannot confirm without execution context")
186
208
 
209
+ from ..core.logging import tool_logger as logger
210
+
187
211
  request_id = generate_id("req")
212
+ logger.info(
213
+ "confirm HITL request",
214
+ extra={
215
+ "invocation_id": ctx.invocation_id,
216
+ "request_id": request_id,
217
+ "action": action[:100],
218
+ "risk_level": risk_level,
219
+ },
220
+ )
188
221
 
189
222
  message = f"Confirm: {action}"
190
223
  if details:
@@ -205,6 +238,10 @@ class ConfirmTool(BaseTool):
205
238
 
206
239
  # Checkpoint
207
240
  if hasattr(ctx, "state") and ctx.state is not None:
241
+ logger.debug(
242
+ "Checkpointing state before confirm suspend",
243
+ extra={"invocation_id": ctx.invocation_id},
244
+ )
208
245
  await ctx.state.checkpoint()
209
246
 
210
247
  # Update invocation
@@ -229,6 +266,13 @@ class ConfirmTool(BaseTool):
229
266
  },
230
267
  ))
231
268
 
269
+ logger.info(
270
+ "Suspending execution for confirm",
271
+ extra={
272
+ "invocation_id": ctx.invocation_id,
273
+ "request_id": request_id,
274
+ },
275
+ )
232
276
  raise HITLSuspend(
233
277
  request_id=request_id,
234
278
  request_type="confirm",
@@ -4,6 +4,7 @@ from __future__ import annotations
4
4
  import json
5
5
  from typing import Any, AsyncIterator
6
6
 
7
+ from ..core.logging import context_logger as logger
7
8
  from .provider import (
8
9
  LLMProvider,
9
10
  LLMEvent,
@@ -75,6 +76,7 @@ class ModelClientProvider:
75
76
  "Please install it: pip install aury-ai-model[all]"
76
77
  )
77
78
 
79
+ logger.debug(f"ModelClientProvider init, provider={provider}, model={model}")
78
80
  self._provider_name = provider
79
81
  self._model_name = model
80
82
  self._capabilities = capabilities or Capabilities()
@@ -100,6 +102,7 @@ class ModelClientProvider:
100
102
  if k not in client_kwargs
101
103
  }
102
104
  self._call_count = 0
105
+ logger.debug(f"ModelClientProvider initialized, provider={provider}, model={model}")
103
106
 
104
107
  @property
105
108
  def provider(self) -> str:
@@ -119,9 +122,15 @@ class ModelClientProvider:
119
122
  """Get model capabilities."""
120
123
  return self._capabilities
121
124
 
122
- def _convert_messages(self, messages: list[LLMMessage]) -> list[Message]:
125
+ def _convert_messages(
126
+ self,
127
+ messages: list[LLMMessage],
128
+ enable_thinking: bool = False,
129
+ ) -> list[Message]:
123
130
  """Convert LLMMessage to aury-ai-model Message.
124
131
 
132
+ Use simple OpenAI format - let API gateway handle Claude conversion.
133
+
125
134
  Supports all message types from aury.ai.model:
126
135
  - system: msg.system(text)
127
136
  - user: msg.user(text, images=[])
@@ -157,45 +166,65 @@ class ModelClientProvider:
157
166
  ))
158
167
 
159
168
  elif m.role == "assistant":
169
+ # Extract text, thinking, and tool_calls from content
170
+ # IMPORTANT: For Claude thinking mode, thinking must be included in history
171
+ # Claude API requires assistant messages to start with thinking block when thinking is enabled
172
+ text_content = None
173
+ thinking_content = None
174
+ tool_calls = None
175
+
160
176
  if isinstance(m.content, str):
161
- result.append(msg.assistant(m.content))
162
- else:
163
- # Handle tool calls in assistant message
177
+ text_content = m.content
178
+ elif isinstance(m.content, list):
179
+ # Extract text, thinking, and tool_use from content parts
164
180
  text_parts = []
165
- tool_calls = []
181
+ thinking_parts = []
182
+ tool_call_list = []
166
183
  for part in m.content:
167
184
  if isinstance(part, dict):
168
185
  if part.get("type") == "text":
169
186
  text_parts.append(part.get("text", ""))
187
+ elif part.get("type") == "thinking":
188
+ # Include thinking for Claude API compatibility
189
+ thinking_parts.append(part.get("thinking", ""))
170
190
  elif part.get("type") == "tool_use":
171
- tool_calls.append(ModelToolCall(
191
+ tool_call_list.append(ModelToolCall(
172
192
  id=part.get("id", ""),
173
193
  name=part.get("name", ""),
174
194
  arguments_json=json.dumps(part.get("input", {})),
175
195
  ))
176
- result.append(msg.assistant(
177
- text=" ".join(text_parts) if text_parts else None,
178
- tool_calls=tool_calls if tool_calls else None,
179
- ))
196
+ text_content = " ".join(text_parts) if text_parts else None
197
+ thinking_content = "".join(thinking_parts) if thinking_parts else None
198
+ tool_calls = tool_call_list if tool_call_list else None
199
+
200
+ result.append(msg.assistant(
201
+ text=text_content,
202
+ thinking=thinking_content,
203
+ tool_calls=tool_calls,
204
+ ))
180
205
 
181
206
  elif m.role == "tool":
182
- # Tool result message - two formats supported:
183
- # 1. Simple: LLMMessage(role="tool", content="result", tool_call_id="xxx")
184
- # 2. List format: content=[{"type": "tool_result", "content": "...", "tool_use_id": "..."}]
185
- if m.tool_call_id and isinstance(m.content, str):
186
- # Simple format
207
+ # Tool result message
208
+ # Handle both string content (OpenAI) and list content (Anthropic)
209
+ tool_call_id = m.tool_call_id
210
+ tool_content = None
211
+
212
+ if isinstance(m.content, str):
213
+ tool_content = m.content
214
+ elif isinstance(m.content, list) and len(m.content) > 0:
215
+ # Anthropic format: [{'type': 'tool_result', 'tool_use_id': '...', 'content': '...'}]
216
+ first_item = m.content[0]
217
+ if isinstance(first_item, dict):
218
+ tool_content = first_item.get("content", "")
219
+ # Also extract tool_call_id if not already set
220
+ if not tool_call_id:
221
+ tool_call_id = first_item.get("tool_use_id") or first_item.get("tool_call_id")
222
+
223
+ if tool_call_id and tool_content is not None:
187
224
  result.append(msg.tool(
188
- result=m.content,
189
- tool_call_id=m.tool_call_id,
225
+ result=tool_content,
226
+ tool_call_id=tool_call_id,
190
227
  ))
191
- elif isinstance(m.content, list):
192
- # List format (for compatibility)
193
- for part in m.content:
194
- if isinstance(part, dict) and part.get("type") == "tool_result":
195
- result.append(msg.tool(
196
- result=str(part.get("content", "")),
197
- tool_call_id=part.get("tool_use_id", ""),
198
- ))
199
228
 
200
229
  return result
201
230
 
@@ -309,7 +338,7 @@ class ModelClientProvider:
309
338
  LLMEvent: Streaming events (content, thinking, tool_call, usage, completed, error)
310
339
  """
311
340
  # Convert messages and tools
312
- model_messages = self._convert_messages(messages)
341
+ model_messages = self._convert_messages(messages, enable_thinking=enable_thinking)
313
342
  model_tools = self._convert_tools(tools)
314
343
 
315
344
  # Merge kwargs
aury/agents/llm/openai.py CHANGED
@@ -121,9 +121,13 @@ class OpenAIProvider:
121
121
  LLMEvent objects
122
122
  """
123
123
  # Build request payload
124
+ converted_msgs = self._convert_messages(messages)
125
+ print(f"[DEBUG OpenAIProvider] Sending {len(converted_msgs)} messages to {self._model}")
126
+ for i, msg in enumerate(converted_msgs):
127
+ print(f"[DEBUG] Message {i}: role={msg.get('role')}, content={str(msg.get('content'))[:100]}")
124
128
  payload: dict[str, Any] = {
125
129
  "model": self._model,
126
- "messages": self._convert_messages(messages),
130
+ "messages": converted_msgs,
127
131
  "stream": True,
128
132
  "stream_options": {"include_usage": True},
129
133
  }
@@ -77,6 +77,15 @@ class MemoryManager:
77
77
  if not messages:
78
78
  return
79
79
 
80
+ logger.info(
81
+ "Memory auto-save on invocation end",
82
+ extra={
83
+ "invocation_id": payload.get("invocation_id"),
84
+ "session_id": payload.get("session_id"),
85
+ "message_count": len(messages),
86
+ },
87
+ )
88
+
80
89
  content = self._format_messages(messages)
81
90
 
82
91
  await self.add(
@@ -120,9 +129,14 @@ class MemoryManager:
120
129
 
121
130
  Returns entry ID or None if filtered out.
122
131
  """
123
- logger.debug(
132
+ logger.info(
124
133
  "Adding to memory",
125
- extra={"trigger": trigger.value, "session_id": session_id}
134
+ extra={
135
+ "trigger": trigger.value,
136
+ "session_id": session_id,
137
+ "invocation_id": invocation_id,
138
+ "content_length": len(content),
139
+ },
126
140
  )
127
141
  entry = MemoryEntry(
128
142
  id=str(uuid4()),
@@ -141,9 +155,17 @@ class MemoryManager:
141
155
 
142
156
  # 1. Apply filters
143
157
  for filter in self.write_filters:
158
+ logger.debug(
159
+ "Applying memory write filter",
160
+ extra={"filter": type(filter).__name__, "invocation_id": invocation_id},
161
+ )
144
162
  result = await filter.filter(entries, write_context)
145
163
 
146
164
  if result.decision == WriteDecision.SKIP:
165
+ logger.debug(
166
+ "Memory entry skipped by filter",
167
+ extra={"filter": type(filter).__name__, "invocation_id": invocation_id},
168
+ )
147
169
  return None
148
170
  elif result.decision == WriteDecision.TRANSFORM:
149
171
  entries = result.entries or []
@@ -179,6 +201,15 @@ class MemoryManager:
179
201
 
180
202
  Searches all configured sources and merges results.
181
203
  """
204
+ logger.info(
205
+ "Searching memory",
206
+ extra={
207
+ "query_length": len(query),
208
+ "limit": limit,
209
+ "source_count": len(self.retrieval_config),
210
+ },
211
+ )
212
+
182
213
  # 1. Search all sources
183
214
  all_results: dict[str, list[ScoredEntry]] = {}
184
215