todo-agent 0.1.0__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
todo_agent/_version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '0.1.0'
32
- __version_tuple__ = version_tuple = (0, 1, 0)
31
+ __version__ = version = '0.2.1'
32
+ __version_tuple__ = version_tuple = (0, 2, 1)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -9,8 +9,8 @@ from .exceptions import InvalidTaskFormatError, TaskNotFoundError, TodoError
9
9
  from .todo_manager import TodoManager
10
10
 
11
11
  __all__ = [
12
- "TodoManager",
13
- "TodoError",
14
- "TaskNotFoundError",
15
12
  "InvalidTaskFormatError",
13
+ "TaskNotFoundError",
14
+ "TodoError",
15
+ "TodoManager",
16
16
  ]
@@ -3,14 +3,14 @@ Conversation management for todo.sh LLM agent.
3
3
  """
4
4
 
5
5
  import time
6
- from typing import List, Dict, Optional
7
6
  from dataclasses import dataclass
8
7
  from enum import Enum
8
+ from typing import Any, Dict, List, Optional
9
9
 
10
10
  try:
11
11
  from todo_agent.infrastructure.token_counter import get_token_counter
12
12
  except ImportError:
13
- from infrastructure.token_counter import get_token_counter
13
+ from infrastructure.token_counter import get_token_counter # type: ignore[no-redef]
14
14
 
15
15
 
16
16
  class MessageRole(Enum):
@@ -76,7 +76,7 @@ class ConversationManager:
76
76
  self._total_tokens += token_count
77
77
  self._trim_if_needed()
78
78
 
79
- def get_messages(self, include_tool_calls: bool = True) -> List[Dict[str, str]]:
79
+ def get_messages(self, include_tool_calls: bool = True) -> List[Dict[str, Any]]:
80
80
  """
81
81
  Get conversation messages in OpenRouter API format.
82
82
 
@@ -86,14 +86,17 @@ class ConversationManager:
86
86
  Returns:
87
87
  List of message dictionaries for API consumption
88
88
  """
89
- messages = []
89
+ messages: List[Dict[str, Any]] = []
90
90
 
91
91
  # Add conversation messages (system prompt is already in history)
92
92
  for msg in self.history:
93
93
  if msg.role == MessageRole.TOOL and not include_tool_calls:
94
94
  continue
95
95
 
96
- message_dict = {"role": msg.role.value, "content": msg.content}
96
+ message_dict: Dict[str, Any] = {
97
+ "role": msg.role.value,
98
+ "content": msg.content,
99
+ }
97
100
 
98
101
  # Handle tool calls in assistant messages
99
102
  if msg.role == MessageRole.ASSISTANT and msg.tool_calls:
@@ -148,8 +151,11 @@ class ConversationManager:
148
151
  system_messages = [
149
152
  msg for msg in self.history if msg.role == MessageRole.SYSTEM
150
153
  ]
151
- recent_messages = self.history[-self.max_messages:]
152
- self.history = system_messages + recent_messages[-self.max_messages + len(system_messages):]
154
+ recent_messages = self.history[-self.max_messages :]
155
+ self.history = (
156
+ system_messages
157
+ + recent_messages[-self.max_messages + len(system_messages) :]
158
+ )
153
159
 
154
160
  # Recalculate total tokens after message count trimming
155
161
  self._recalculate_total_tokens()
@@ -213,7 +219,7 @@ class ConversationManager:
213
219
  self.history.insert(0, system_message)
214
220
  self._total_tokens += token_count
215
221
 
216
- def get_conversation_summary(self) -> Dict[str, any]:
222
+ def get_conversation_summary(self) -> Dict[str, Any]:
217
223
  """
218
224
  Get conversation statistics and summary.
219
225
 
@@ -235,13 +241,17 @@ class ConversationManager:
235
241
 
236
242
  thinking_stats = {}
237
243
  if thinking_times:
238
- thinking_stats = {
239
- "total_thinking_time": sum(thinking_times),
240
- "average_thinking_time": sum(thinking_times) / len(thinking_times),
241
- "min_thinking_time": min(thinking_times),
242
- "max_thinking_time": max(thinking_times),
243
- "thinking_time_count": len(thinking_times),
244
- }
244
+ # Filter out None values for calculations
245
+ valid_thinking_times = [t for t in thinking_times if t is not None]
246
+ if valid_thinking_times:
247
+ thinking_stats = {
248
+ "total_thinking_time": sum(valid_thinking_times),
249
+ "average_thinking_time": sum(valid_thinking_times)
250
+ / len(valid_thinking_times),
251
+ "min_thinking_time": min(valid_thinking_times),
252
+ "max_thinking_time": max(valid_thinking_times),
253
+ "thinking_time_count": len(valid_thinking_times),
254
+ }
245
255
 
246
256
  return {
247
257
  "total_messages": len(self.history),
@@ -256,12 +266,24 @@ class ConversationManager:
256
266
  [msg for msg in self.history if msg.role == MessageRole.TOOL]
257
267
  ),
258
268
  "oldest_message": (
259
- min([msg.timestamp for msg in messages_with_timestamps])
269
+ min(
270
+ [
271
+ msg.timestamp
272
+ for msg in messages_with_timestamps
273
+ if msg.timestamp is not None
274
+ ]
275
+ )
260
276
  if messages_with_timestamps
261
277
  else None
262
278
  ),
263
279
  "newest_message": (
264
- max([msg.timestamp for msg in messages_with_timestamps])
280
+ max(
281
+ [
282
+ msg.timestamp
283
+ for msg in messages_with_timestamps
284
+ if msg.timestamp is not None
285
+ ]
286
+ )
265
287
  if messages_with_timestamps
266
288
  else None
267
289
  ),
@@ -2,14 +2,14 @@
2
2
  Todo.sh operations orchestration and business logic.
3
3
  """
4
4
 
5
- from typing import Optional
6
5
  from datetime import datetime
6
+ from typing import Any, Optional
7
7
 
8
8
 
9
9
  class TodoManager:
10
10
  """Orchestrates todo.sh operations with business logic."""
11
11
 
12
- def __init__(self, todo_shell):
12
+ def __init__(self, todo_shell: Any) -> None:
13
13
  self.todo_shell = todo_shell
14
14
 
15
15
  def add_task(
@@ -23,20 +23,20 @@ class TodoManager:
23
23
  """Add new task with explicit project/context parameters."""
24
24
  # Build the full task description with priority, project, and context
25
25
  full_description = description
26
-
26
+
27
27
  if priority:
28
28
  full_description = f"({priority}) {full_description}"
29
-
29
+
30
30
  if project:
31
31
  full_description = f"{full_description} +{project}"
32
-
32
+
33
33
  if context:
34
34
  full_description = f"{full_description} @{context}"
35
-
35
+
36
36
  if due:
37
37
  full_description = f"{full_description} due:{due}"
38
-
39
- result = self.todo_shell.add(full_description)
38
+
39
+ self.todo_shell.add(full_description)
40
40
  return f"Added task: {full_description}"
41
41
 
42
42
  def list_tasks(self, filter: Optional[str] = None) -> str:
@@ -44,7 +44,7 @@ class TodoManager:
44
44
  result = self.todo_shell.list_tasks(filter)
45
45
  if not result.strip():
46
46
  return "No tasks found."
47
-
47
+
48
48
  # Return the raw todo.txt format for the LLM to format conversationally
49
49
  # The LLM will convert this into natural language in its response
50
50
  return result
@@ -54,14 +54,22 @@ class TodoManager:
54
54
  result = self.todo_shell.complete(task_number)
55
55
  return f"Completed task {task_number}: {result}"
56
56
 
57
- def get_overview(self, **kwargs) -> str:
57
+ def get_overview(self, **kwargs: Any) -> str:
58
58
  """Show current task statistics."""
59
59
  tasks = self.todo_shell.list_tasks()
60
60
  completed = self.todo_shell.list_completed()
61
-
62
- task_count = len([line for line in tasks.split('\n') if line.strip()]) if tasks.strip() else 0
63
- completed_count = len([line for line in completed.split('\n') if line.strip()]) if completed.strip() else 0
64
-
61
+
62
+ task_count = (
63
+ len([line for line in tasks.split("\n") if line.strip()])
64
+ if tasks.strip()
65
+ else 0
66
+ )
67
+ completed_count = (
68
+ len([line for line in completed.split("\n") if line.strip()])
69
+ if completed.strip()
70
+ else 0
71
+ )
72
+
65
73
  return f"Task Overview:\n- Active tasks: {task_count}\n- Completed tasks: {completed_count}"
66
74
 
67
75
  def replace_task(self, task_number: int, new_description: str) -> str:
@@ -97,14 +105,14 @@ class TodoManager:
97
105
  result = self.todo_shell.remove_priority(task_number)
98
106
  return f"Removed priority from task {task_number}: {result}"
99
107
 
100
- def list_projects(self, **kwargs) -> str:
108
+ def list_projects(self, **kwargs: Any) -> str:
101
109
  """List all available projects in todo.txt."""
102
110
  result = self.todo_shell.list_projects()
103
111
  if not result.strip():
104
112
  return "No projects found."
105
113
  return result
106
114
 
107
- def list_contexts(self, **kwargs) -> str:
115
+ def list_contexts(self, **kwargs: Any) -> str:
108
116
  """List all available contexts in todo.txt."""
109
117
  result = self.todo_shell.list_contexts()
110
118
  if not result.strip():
@@ -112,17 +120,17 @@ class TodoManager:
112
120
  return result
113
121
 
114
122
  def list_completed_tasks(
115
- self,
123
+ self,
116
124
  filter: Optional[str] = None,
117
125
  project: Optional[str] = None,
118
126
  context: Optional[str] = None,
119
127
  text_search: Optional[str] = None,
120
128
  date_from: Optional[str] = None,
121
129
  date_to: Optional[str] = None,
122
- **kwargs
130
+ **kwargs: Any,
123
131
  ) -> str:
124
132
  """List completed tasks with optional filtering.
125
-
133
+
126
134
  Args:
127
135
  filter: Raw filter string (e.g., '+work', '@office')
128
136
  project: Filter by project (without + symbol)
@@ -133,19 +141,19 @@ class TodoManager:
133
141
  """
134
142
  # Build filter string from individual parameters
135
143
  filter_parts = []
136
-
144
+
137
145
  if filter:
138
146
  filter_parts.append(filter)
139
-
147
+
140
148
  if project:
141
149
  filter_parts.append(f"+{project}")
142
-
150
+
143
151
  if context:
144
152
  filter_parts.append(f"@{context}")
145
-
153
+
146
154
  if text_search:
147
155
  filter_parts.append(text_search)
148
-
156
+
149
157
  # Handle date filtering - todo.sh supports direct date pattern matching
150
158
  # LIMITATIONS: Due to todo.sh constraints, complex date ranges are not supported.
151
159
  # The filtering behavior is:
@@ -164,31 +172,33 @@ class TodoManager:
164
172
  # For end date only, we'll use the year-month pattern
165
173
  # This will match all tasks in that month
166
174
  filter_parts.append(date_to[:7]) # YYYY-MM format
167
-
175
+
168
176
  # Combine all filters
169
177
  combined_filter = " ".join(filter_parts) if filter_parts else None
170
-
178
+
171
179
  result = self.todo_shell.list_completed(combined_filter)
172
180
  if not result.strip():
173
181
  return "No completed tasks found matching the criteria."
174
182
  return result
175
183
 
176
- def move_task(self, task_number: int, destination: str, source: Optional[str] = None) -> str:
184
+ def move_task(
185
+ self, task_number: int, destination: str, source: Optional[str] = None
186
+ ) -> str:
177
187
  """Move task from source to destination file."""
178
188
  result = self.todo_shell.move(task_number, destination, source)
179
189
  return f"Moved task {task_number} to {destination}: {result}"
180
190
 
181
- def archive_tasks(self, **kwargs) -> str:
191
+ def archive_tasks(self, **kwargs: Any) -> str:
182
192
  """Archive completed tasks."""
183
193
  result = self.todo_shell.archive()
184
194
  return f"Archived tasks: {result}"
185
195
 
186
- def deduplicate_tasks(self, **kwargs) -> str:
196
+ def deduplicate_tasks(self, **kwargs: Any) -> str:
187
197
  """Remove duplicate tasks."""
188
198
  result = self.todo_shell.deduplicate()
189
199
  return f"Deduplicated tasks: {result}"
190
200
 
191
- def get_current_datetime(self, **kwargs) -> str:
201
+ def get_current_datetime(self, **kwargs: Any) -> str:
192
202
  """Get the current date and time."""
193
203
  now = datetime.now()
194
204
  return f"Current date and time: {now.strftime('%Y-%m-%d %H:%M:%S')} ({now.strftime('%A, %B %d, %Y at %I:%M %p')})"
@@ -8,4 +8,4 @@ from .config import Config
8
8
  from .openrouter_client import OpenRouterClient
9
9
  from .todo_shell import TodoShell
10
10
 
11
- __all__ = ["OpenRouterClient", "TodoShell", "Config"]
11
+ __all__ = ["Config", "OpenRouterClient", "TodoShell"]
@@ -11,18 +11,18 @@ class Config:
11
11
  DEFAULT_MODEL = "openai/gpt-4o-mini"
12
12
  # DEFAULT_MODEL = "mistralai/mistral-small-3.1-24b-instruct"
13
13
 
14
- def __init__(self):
14
+ def __init__(self) -> None:
15
15
  # Provider selection
16
16
  self.provider = os.getenv("LLM_PROVIDER", "openrouter")
17
-
17
+
18
18
  # OpenRouter configuration
19
19
  self.openrouter_api_key = os.getenv("OPENROUTER_API_KEY")
20
20
  self.openrouter_model = os.getenv("OPENROUTER_MODEL", self.DEFAULT_MODEL)
21
-
21
+
22
22
  # Ollama configuration
23
23
  self.ollama_base_url = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
24
24
  self.ollama_model = os.getenv("OLLAMA_MODEL", "mistral-small3.1")
25
-
25
+
26
26
  # Common configuration
27
27
  self.model = self._get_model_for_provider()
28
28
  self.log_level = os.getenv("LOG_LEVEL", "INFO")
@@ -40,7 +40,9 @@ class Config:
40
40
  """Validate required configuration."""
41
41
  if self.provider == "openrouter":
42
42
  if not self.openrouter_api_key:
43
- raise ValueError("OPENROUTER_API_KEY environment variable is required for OpenRouter provider")
43
+ raise ValueError(
44
+ "OPENROUTER_API_KEY environment variable is required for OpenRouter provider"
45
+ )
44
46
  elif self.provider == "ollama":
45
47
  # Ollama doesn't require API key, but we could validate the base URL is reachable
46
48
  pass
@@ -4,29 +4,39 @@ LLM inference engine for todo.sh agent.
4
4
 
5
5
  import os
6
6
  import time
7
- from typing import Any, Dict, List, Optional
7
+ from typing import Any, Dict, Optional
8
8
 
9
9
  try:
10
+ from todo_agent.core.conversation_manager import ConversationManager, MessageRole
10
11
  from todo_agent.infrastructure.config import Config
11
12
  from todo_agent.infrastructure.llm_client_factory import LLMClientFactory
12
13
  from todo_agent.infrastructure.logger import Logger
13
- from todo_agent.core.conversation_manager import ConversationManager, MessageRole
14
14
  from todo_agent.interface.tools import ToolCallHandler
15
15
  except ImportError:
16
- from infrastructure.config import Config
17
- from infrastructure.llm_client_factory import LLMClientFactory
18
- from infrastructure.logger import Logger
19
- from core.conversation_manager import ConversationManager, MessageRole
20
- from interface.tools import ToolCallHandler
16
+ from core.conversation_manager import ( # type: ignore[no-redef]
17
+ ConversationManager,
18
+ MessageRole,
19
+ )
20
+ from infrastructure.config import Config # type: ignore[no-redef]
21
+ from infrastructure.llm_client_factory import ( # type: ignore[no-redef]
22
+ LLMClientFactory,
23
+ )
24
+ from infrastructure.logger import Logger # type: ignore[no-redef]
25
+ from interface.tools import ToolCallHandler # type: ignore[no-redef]
21
26
 
22
27
 
23
28
  class Inference:
24
29
  """LLM inference engine that orchestrates tool calling and conversation management."""
25
30
 
26
- def __init__(self, config: Config, tool_handler: ToolCallHandler, logger: Optional[Logger] = None):
31
+ def __init__(
32
+ self,
33
+ config: Config,
34
+ tool_handler: ToolCallHandler,
35
+ logger: Optional[Logger] = None,
36
+ ):
27
37
  """
28
38
  Initialize the inference engine.
29
-
39
+
30
40
  Args:
31
41
  config: Configuration object
32
42
  tool_handler: Tool call handler for executing tools
@@ -35,17 +45,19 @@ class Inference:
35
45
  self.config = config
36
46
  self.tool_handler = tool_handler
37
47
  self.logger = logger or Logger("inference")
38
-
48
+
39
49
  # Initialize LLM client using factory
40
50
  self.llm_client = LLMClientFactory.create_client(config, self.logger)
41
-
51
+
42
52
  # Initialize conversation manager
43
53
  self.conversation_manager = ConversationManager()
44
-
54
+
45
55
  # Set up system prompt
46
56
  self._setup_system_prompt()
47
-
48
- self.logger.info(f"Inference engine initialized with {config.provider} provider using model: {self.llm_client.get_model_name()}")
57
+
58
+ self.logger.info(
59
+ f"Inference engine initialized with {config.provider} provider using model: {self.llm_client.get_model_name()}"
60
+ )
49
61
 
50
62
  def _setup_system_prompt(self) -> None:
51
63
  """Set up the system prompt for the LLM."""
@@ -57,75 +69,106 @@ class Inference:
57
69
  """Load and format the system prompt from file."""
58
70
  # Generate tools section programmatically
59
71
  tools_section = self._generate_tools_section()
60
-
72
+
61
73
  # Get current datetime for interpolation
62
74
  from datetime import datetime
75
+
63
76
  current_datetime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
64
-
77
+
65
78
  # Load system prompt from file
66
- prompt_file_path = os.path.join(os.path.dirname(__file__), "prompts", "system_prompt.txt")
67
-
79
+ prompt_file_path = os.path.join(
80
+ os.path.dirname(__file__), "prompts", "system_prompt.txt"
81
+ )
82
+
68
83
  try:
69
- with open(prompt_file_path, 'r', encoding='utf-8') as f:
84
+ with open(prompt_file_path, encoding="utf-8") as f:
70
85
  system_prompt_template = f.read()
71
-
86
+
72
87
  # Format the template with the tools section and current datetime
73
88
  return system_prompt_template.format(
74
- tools_section=tools_section,
75
- current_datetime=current_datetime
89
+ tools_section=tools_section, current_datetime=current_datetime
76
90
  )
77
-
91
+
78
92
  except FileNotFoundError:
79
93
  self.logger.error(f"System prompt file not found: {prompt_file_path}")
80
94
  raise
81
95
  except Exception as e:
82
- self.logger.error(f"Error loading system prompt: {str(e)}")
96
+ self.logger.error(f"Error loading system prompt: {e!s}")
83
97
  raise
84
98
 
85
99
  def _generate_tools_section(self) -> str:
86
100
  """Generate the AVAILABLE TOOLS section with strategic categorization."""
87
101
  tool_categories = {
88
- "Discovery Tools": ["list_projects", "list_contexts", "list_tasks", "list_completed_tasks"],
89
- "Modification Tools": ["add_task", "complete_task", "replace_task", "append_to_task", "prepend_to_task"],
90
- "Management Tools": ["delete_task", "set_priority", "remove_priority", "move_task"],
91
- "Maintenance Tools": ["archive_tasks", "deduplicate_tasks", "get_overview"]
102
+ "Discovery Tools": [
103
+ "list_projects",
104
+ "list_contexts",
105
+ "list_tasks",
106
+ "list_completed_tasks",
107
+ ],
108
+ "Modification Tools": [
109
+ "add_task",
110
+ "complete_task",
111
+ "replace_task",
112
+ "append_to_task",
113
+ "prepend_to_task",
114
+ ],
115
+ "Management Tools": [
116
+ "delete_task",
117
+ "set_priority",
118
+ "remove_priority",
119
+ "move_task",
120
+ ],
121
+ "Maintenance Tools": ["archive_tasks", "deduplicate_tasks", "get_overview"],
92
122
  }
93
-
123
+
94
124
  tools_section = []
95
125
  for category, tool_names in tool_categories.items():
96
126
  tools_section.append(f"\n**{category}:**")
97
127
  for tool_name in tool_names:
98
- tool_info = next((t for t in self.tool_handler.tools if t["function"]["name"] == tool_name), None)
128
+ tool_info = next(
129
+ (
130
+ t
131
+ for t in self.tool_handler.tools
132
+ if t["function"]["name"] == tool_name
133
+ ),
134
+ None,
135
+ )
99
136
  if tool_info:
100
137
  # Get first sentence of description for concise overview
101
- first_sentence = tool_info["function"]["description"].split('.')[0] + '.'
138
+ first_sentence = (
139
+ tool_info["function"]["description"].split(".")[0] + "."
140
+ )
102
141
  tools_section.append(f"- {tool_name}(): {first_sentence}")
103
-
104
- return '\n'.join(tools_section)
142
+
143
+ return "\n".join(tools_section)
105
144
 
106
145
  def process_request(self, user_input: str) -> tuple[str, float]:
107
146
  """
108
147
  Process a user request through the LLM with tool orchestration.
109
-
148
+
110
149
  Args:
111
150
  user_input: Natural language user request
112
-
151
+
113
152
  Returns:
114
153
  Tuple of (formatted response for user, thinking time in seconds)
115
154
  """
116
155
  # Start timing the request
117
156
  start_time = time.time()
118
-
157
+
119
158
  try:
120
- self.logger.debug(f"Starting request processing for: {user_input[:30]}{'...' if len(user_input) > 30 else ''}")
121
-
159
+ self.logger.debug(
160
+ f"Starting request processing for: {user_input[:30]}{'...' if len(user_input) > 30 else ''}"
161
+ )
162
+
122
163
  # Add user message to conversation
123
164
  self.conversation_manager.add_message(MessageRole.USER, user_input)
124
165
  self.logger.debug("Added user message to conversation")
125
166
 
126
167
  # Get conversation history for LLM
127
168
  messages = self.conversation_manager.get_messages()
128
- self.logger.debug(f"Retrieved {len(messages)} messages from conversation history")
169
+ self.logger.debug(
170
+ f"Retrieved {len(messages)} messages from conversation history"
171
+ )
129
172
 
130
173
  # Send to LLM with function calling enabled
131
174
  self.logger.debug("Sending request to LLM with tools")
@@ -142,26 +185,32 @@ class Inference:
142
185
  break
143
186
 
144
187
  tool_call_count += 1
145
- self.logger.debug(f"Executing tool call sequence #{tool_call_count} with {len(tool_calls)} tools")
188
+ self.logger.debug(
189
+ f"Executing tool call sequence #{tool_call_count} with {len(tool_calls)} tools"
190
+ )
146
191
 
147
192
  # Execute all tool calls and collect results
148
193
  tool_results = []
149
194
  for i, tool_call in enumerate(tool_calls):
150
195
  tool_name = tool_call.get("function", {}).get("name", "unknown")
151
196
  tool_call_id = tool_call.get("id", "unknown")
152
- self.logger.debug(f"=== TOOL EXECUTION #{i+1}/{len(tool_calls)} ===")
197
+ self.logger.debug(
198
+ f"=== TOOL EXECUTION #{i + 1}/{len(tool_calls)} ==="
199
+ )
153
200
  self.logger.debug(f"Tool: {tool_name}")
154
201
  self.logger.debug(f"Tool Call ID: {tool_call_id}")
155
202
  self.logger.debug(f"Raw tool call: {tool_call}")
156
-
203
+
157
204
  result = self.tool_handler.execute_tool(tool_call)
158
-
205
+
159
206
  # Log tool execution result (success or error)
160
207
  if result.get("error", False):
161
- self.logger.warning(f"Tool {tool_name} failed: {result.get('user_message', result.get('output', 'Unknown error'))}")
208
+ self.logger.warning(
209
+ f"Tool {tool_name} failed: {result.get('user_message', result.get('output', 'Unknown error'))}"
210
+ )
162
211
  else:
163
212
  self.logger.debug(f"Tool {tool_name} succeeded")
164
-
213
+
165
214
  self.logger.debug(f"Tool result: {result}")
166
215
  tool_results.append(result)
167
216
 
@@ -180,12 +229,16 @@ class Inference:
180
229
  # Calculate and log total thinking time
181
230
  end_time = time.time()
182
231
  thinking_time = end_time - start_time
183
-
232
+
184
233
  # Add final assistant response to conversation with thinking time
185
234
  final_content = self.llm_client.extract_content(response)
186
- self.conversation_manager.add_message(MessageRole.ASSISTANT, final_content, thinking_time=thinking_time)
187
-
188
- self.logger.info(f"Request completed successfully with {tool_call_count} tool call sequences in {thinking_time:.2f}s")
235
+ self.conversation_manager.add_message(
236
+ MessageRole.ASSISTANT, final_content, thinking_time=thinking_time
237
+ )
238
+
239
+ self.logger.info(
240
+ f"Request completed successfully with {tool_call_count} tool call sequences in {thinking_time:.2f}s"
241
+ )
189
242
 
190
243
  # Return final user-facing response and thinking time
191
244
  return final_content, thinking_time
@@ -194,13 +247,15 @@ class Inference:
194
247
  # Calculate and log thinking time even for failed requests
195
248
  end_time = time.time()
196
249
  thinking_time = end_time - start_time
197
- self.logger.error(f"Error processing request after {thinking_time:.2f}s: {str(e)}")
198
- return f"Error: {str(e)}", thinking_time
250
+ self.logger.error(
251
+ f"Error processing request after {thinking_time:.2f}s: {e!s}"
252
+ )
253
+ return f"Error: {e!s}", thinking_time
199
254
 
200
- def get_conversation_summary(self) -> Dict[str, any]:
255
+ def get_conversation_summary(self) -> Dict[str, Any]:
201
256
  """
202
257
  Get conversation statistics and summary.
203
-
258
+
204
259
  Returns:
205
260
  Dictionary with conversation metrics
206
261
  """
@@ -214,7 +269,7 @@ class Inference:
214
269
  def get_conversation_manager(self) -> ConversationManager:
215
270
  """
216
271
  Get the conversation manager instance.
217
-
272
+
218
273
  Returns:
219
274
  Conversation manager instance
220
275
  """