todo-agent 0.3.1__py3-none-any.whl → 0.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- todo_agent/_version.py +2 -2
- todo_agent/core/conversation_manager.py +1 -1
- todo_agent/core/exceptions.py +54 -3
- todo_agent/core/todo_manager.py +127 -56
- todo_agent/infrastructure/calendar_utils.py +2 -4
- todo_agent/infrastructure/inference.py +158 -52
- todo_agent/infrastructure/llm_client.py +258 -1
- todo_agent/infrastructure/ollama_client.py +77 -76
- todo_agent/infrastructure/openrouter_client.py +77 -72
- todo_agent/infrastructure/prompts/system_prompt.txt +88 -396
- todo_agent/infrastructure/todo_shell.py +37 -27
- todo_agent/interface/cli.py +129 -19
- todo_agent/interface/formatters.py +25 -0
- todo_agent/interface/progress.py +69 -0
- todo_agent/interface/tools.py +142 -23
- {todo_agent-0.3.1.dist-info → todo_agent-0.3.3.dist-info}/METADATA +3 -3
- todo_agent-0.3.3.dist-info/RECORD +30 -0
- todo_agent-0.3.1.dist-info/RECORD +0 -29
- {todo_agent-0.3.1.dist-info → todo_agent-0.3.3.dist-info}/WHEEL +0 -0
- {todo_agent-0.3.1.dist-info → todo_agent-0.3.3.dist-info}/entry_points.txt +0 -0
- {todo_agent-0.3.1.dist-info → todo_agent-0.3.3.dist-info}/licenses/LICENSE +0 -0
- {todo_agent-0.3.1.dist-info → todo_agent-0.3.3.dist-info}/top_level.txt +0 -0
@@ -2,67 +2,58 @@
|
|
2
2
|
LLM client for OpenRouter API communication.
|
3
3
|
"""
|
4
4
|
|
5
|
-
import json
|
6
|
-
import time
|
7
5
|
from typing import Any, Dict, List
|
8
6
|
|
9
|
-
import
|
10
|
-
|
11
|
-
try:
|
12
|
-
from todo_agent.infrastructure.config import Config
|
13
|
-
from todo_agent.infrastructure.llm_client import LLMClient
|
14
|
-
from todo_agent.infrastructure.logger import Logger
|
15
|
-
from todo_agent.infrastructure.token_counter import get_token_counter
|
16
|
-
except ImportError:
|
17
|
-
from infrastructure.config import Config # type: ignore[no-redef]
|
18
|
-
from infrastructure.llm_client import LLMClient # type: ignore[no-redef]
|
19
|
-
from infrastructure.logger import Logger # type: ignore[no-redef]
|
20
|
-
from infrastructure.token_counter import get_token_counter # type: ignore[no-redef]
|
7
|
+
from todo_agent.infrastructure.llm_client import LLMClient
|
21
8
|
|
22
9
|
|
23
10
|
class OpenRouterClient(LLMClient):
|
24
11
|
"""LLM API communication and response handling."""
|
25
12
|
|
26
|
-
def __init__(self, config:
|
27
|
-
self.config = config
|
28
|
-
self.api_key = config.openrouter_api_key
|
29
|
-
self.model = config.model
|
30
|
-
self.base_url = "https://openrouter.ai/api/v1"
|
31
|
-
self.logger = Logger("openrouter_client")
|
32
|
-
self.token_counter = get_token_counter(self.model)
|
33
|
-
|
34
|
-
def _estimate_tokens(self, text: str) -> int:
|
13
|
+
def __init__(self, config: Any) -> None:
|
35
14
|
"""
|
36
|
-
|
15
|
+
Initialize OpenRouter client.
|
37
16
|
|
38
17
|
Args:
|
39
|
-
|
40
|
-
|
41
|
-
Returns:
|
42
|
-
Number of tokens
|
18
|
+
config: Configuration object
|
43
19
|
"""
|
44
|
-
|
20
|
+
super().__init__(config, config.model, "openrouter_client")
|
21
|
+
self.api_key = config.openrouter_api_key
|
22
|
+
self.base_url = "https://openrouter.ai/api/v1"
|
45
23
|
|
46
|
-
def
|
47
|
-
"""
|
48
|
-
|
49
|
-
|
50
|
-
|
24
|
+
def _get_request_headers(self) -> Dict[str, str]:
|
25
|
+
"""Get request headers for OpenRouter API."""
|
26
|
+
return {
|
27
|
+
"Authorization": f"Bearer {self.api_key}",
|
28
|
+
"Content-Type": "application/json",
|
29
|
+
}
|
51
30
|
|
52
|
-
|
31
|
+
def _get_request_payload(
|
32
|
+
self, messages: List[Dict[str, str]], tools: List[Dict[str, Any]]
|
33
|
+
) -> Dict[str, Any]:
|
34
|
+
"""Get request payload for OpenRouter API."""
|
35
|
+
return {
|
36
|
+
"model": self.model,
|
37
|
+
"messages": messages,
|
38
|
+
"tools": tools,
|
39
|
+
"tool_choice": "auto",
|
40
|
+
}
|
53
41
|
|
54
|
-
|
55
|
-
|
42
|
+
def _get_api_endpoint(self) -> str:
|
43
|
+
"""Get OpenRouter API endpoint."""
|
44
|
+
return f"{self.base_url}/chat/completions"
|
56
45
|
|
57
|
-
def
|
58
|
-
self,
|
46
|
+
def _process_response(
|
47
|
+
self, response_data: Dict[str, Any], start_time: float
|
59
48
|
) -> None:
|
60
|
-
"""
|
49
|
+
"""Process and log OpenRouter response details."""
|
50
|
+
import time
|
51
|
+
|
61
52
|
end_time = time.time()
|
62
53
|
latency_ms = (end_time - start_time) * 1000
|
63
54
|
|
64
55
|
# Extract token usage from response if available
|
65
|
-
usage =
|
56
|
+
usage = response_data.get("usage", {})
|
66
57
|
prompt_tokens = usage.get("prompt_tokens", "unknown")
|
67
58
|
completion_tokens = usage.get("completion_tokens", "unknown")
|
68
59
|
total_tokens = usage.get("total_tokens", "unknown")
|
@@ -73,7 +64,7 @@ class OpenRouterClient(LLMClient):
|
|
73
64
|
)
|
74
65
|
|
75
66
|
# Extract and log choice details
|
76
|
-
choices =
|
67
|
+
choices = response_data.get("choices", [])
|
77
68
|
if not choices:
|
78
69
|
return
|
79
70
|
|
@@ -99,7 +90,7 @@ class OpenRouterClient(LLMClient):
|
|
99
90
|
tool_name = tool_call.get("function", {}).get("name", "unknown")
|
100
91
|
self.logger.info(f" Tool call {i}: {tool_name}")
|
101
92
|
|
102
|
-
self.logger.debug(f"Raw response: {
|
93
|
+
self.logger.debug(f"Raw response: {response_data}")
|
103
94
|
|
104
95
|
def chat_with_tools(
|
105
96
|
self, messages: List[Dict[str, str]], tools: List[Dict[str, Any]]
|
@@ -114,33 +105,7 @@ class OpenRouterClient(LLMClient):
|
|
114
105
|
Returns:
|
115
106
|
API response dictionary
|
116
107
|
"""
|
117
|
-
|
118
|
-
"Authorization": f"Bearer {self.api_key}",
|
119
|
-
"Content-Type": "application/json",
|
120
|
-
}
|
121
|
-
|
122
|
-
payload = {
|
123
|
-
"model": self.model,
|
124
|
-
"messages": messages,
|
125
|
-
"tools": tools,
|
126
|
-
"tool_choice": "auto",
|
127
|
-
}
|
128
|
-
|
129
|
-
start_time = time.time()
|
130
|
-
self._log_request_details(payload, start_time)
|
131
|
-
|
132
|
-
response = requests.post( # nosec B113
|
133
|
-
f"{self.base_url}/chat/completions", headers=headers, json=payload
|
134
|
-
)
|
135
|
-
|
136
|
-
if response.status_code != 200:
|
137
|
-
self.logger.error(f"OpenRouter API error: {response.text}")
|
138
|
-
raise Exception(f"OpenRouter API error: {response.text}")
|
139
|
-
|
140
|
-
response_data: Dict[str, Any] = response.json()
|
141
|
-
self._log_response_details(response_data, start_time)
|
142
|
-
|
143
|
-
return response_data
|
108
|
+
return self._make_http_request(messages, tools)
|
144
109
|
|
145
110
|
def continue_with_tool_result(self, tool_result: Dict[str, Any]) -> Dict[str, Any]:
|
146
111
|
"""
|
@@ -157,13 +122,26 @@ class OpenRouterClient(LLMClient):
|
|
157
122
|
|
158
123
|
def extract_tool_calls(self, response: Dict[str, Any]) -> List[Dict[str, Any]]:
|
159
124
|
"""Extract tool calls from API response."""
|
125
|
+
# Check for provider errors first
|
126
|
+
if response.get("error", False):
|
127
|
+
self.logger.warning(
|
128
|
+
f"Cannot extract tool calls from error response: {response.get('error_type')}"
|
129
|
+
)
|
130
|
+
return []
|
131
|
+
|
160
132
|
tool_calls = []
|
161
133
|
if response.get("choices"):
|
162
134
|
choice = response["choices"][0]
|
163
135
|
if "message" in choice and "tool_calls" in choice["message"]:
|
164
|
-
|
136
|
+
raw_tool_calls = choice["message"]["tool_calls"]
|
137
|
+
|
138
|
+
# Validate each tool call using common validation
|
139
|
+
for i, tool_call in enumerate(raw_tool_calls):
|
140
|
+
if self._validate_tool_call(tool_call, i):
|
141
|
+
tool_calls.append(tool_call)
|
142
|
+
|
165
143
|
self.logger.debug(
|
166
|
-
f"Extracted {len(tool_calls)} tool calls from
|
144
|
+
f"Extracted {len(tool_calls)} valid tool calls from {len(raw_tool_calls)} total"
|
167
145
|
)
|
168
146
|
for i, tool_call in enumerate(tool_calls):
|
169
147
|
tool_name = tool_call.get("function", {}).get("name", "unknown")
|
@@ -179,6 +157,13 @@ class OpenRouterClient(LLMClient):
|
|
179
157
|
|
180
158
|
def extract_content(self, response: Dict[str, Any]) -> str:
|
181
159
|
"""Extract content from API response."""
|
160
|
+
# Check for provider errors first
|
161
|
+
if response.get("error", False):
|
162
|
+
self.logger.warning(
|
163
|
+
f"Cannot extract content from error response: {response.get('error_type')}"
|
164
|
+
)
|
165
|
+
return ""
|
166
|
+
|
182
167
|
if response.get("choices"):
|
183
168
|
choice = response["choices"][0]
|
184
169
|
if "message" in choice and "content" in choice["message"]:
|
@@ -194,3 +179,23 @@ class OpenRouterClient(LLMClient):
|
|
194
179
|
Model name string
|
195
180
|
"""
|
196
181
|
return self.model
|
182
|
+
|
183
|
+
def get_provider_name(self) -> str:
|
184
|
+
"""
|
185
|
+
Get the provider name for this client.
|
186
|
+
|
187
|
+
Returns:
|
188
|
+
Provider name string
|
189
|
+
"""
|
190
|
+
return "openrouter"
|
191
|
+
|
192
|
+
def get_request_timeout(self) -> int:
|
193
|
+
"""
|
194
|
+
Get the request timeout in seconds for OpenRouter.
|
195
|
+
|
196
|
+
Cloud APIs typically respond quickly, so we use a 30-second timeout.
|
197
|
+
|
198
|
+
Returns:
|
199
|
+
Timeout value in seconds (30)
|
200
|
+
"""
|
201
|
+
return 30
|