todo-agent 0.2.9__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,67 +2,54 @@
2
2
  LLM client for OpenRouter API communication.
3
3
  """
4
4
 
5
- import json
6
- import time
7
5
  from typing import Any, Dict, List
8
6
 
9
- import requests
10
-
11
- try:
12
- from todo_agent.infrastructure.config import Config
13
- from todo_agent.infrastructure.llm_client import LLMClient
14
- from todo_agent.infrastructure.logger import Logger
15
- from todo_agent.infrastructure.token_counter import get_token_counter
16
- except ImportError:
17
- from infrastructure.config import Config # type: ignore[no-redef]
18
- from infrastructure.llm_client import LLMClient # type: ignore[no-redef]
19
- from infrastructure.logger import Logger # type: ignore[no-redef]
20
- from infrastructure.token_counter import get_token_counter # type: ignore[no-redef]
7
+ from todo_agent.infrastructure.llm_client import LLMClient
21
8
 
22
9
 
23
10
  class OpenRouterClient(LLMClient):
24
11
  """LLM API communication and response handling."""
25
12
 
26
- def __init__(self, config: Config):
27
- self.config = config
28
- self.api_key = config.openrouter_api_key
29
- self.model = config.model
30
- self.base_url = "https://openrouter.ai/api/v1"
31
- self.logger = Logger("openrouter_client")
32
- self.token_counter = get_token_counter(self.model)
33
-
34
- def _estimate_tokens(self, text: str) -> int:
13
+ def __init__(self, config):
35
14
  """
36
- Estimate token count for text using accurate tokenization.
15
+ Initialize OpenRouter client.
37
16
 
38
17
  Args:
39
- text: Text to count tokens for
40
-
41
- Returns:
42
- Number of tokens
18
+ config: Configuration object
43
19
  """
44
- return self.token_counter.count_tokens(text)
20
+ super().__init__(config, config.model, "openrouter_client")
21
+ self.api_key = config.openrouter_api_key
22
+ self.base_url = "https://openrouter.ai/api/v1"
45
23
 
46
- def _log_request_details(self, payload: Dict[str, Any], start_time: float) -> None:
47
- """Log request details including accurate token count."""
48
- # Count tokens for messages
49
- messages = payload.get("messages", [])
50
- tools = payload.get("tools", [])
24
+ def _get_request_headers(self) -> Dict[str, str]:
25
+ """Get request headers for OpenRouter API."""
26
+ return {
27
+ "Authorization": f"Bearer {self.api_key}",
28
+ "Content-Type": "application/json",
29
+ }
51
30
 
52
- total_tokens = self.token_counter.count_request_tokens(messages, tools)
31
+ def _get_request_payload(self, messages: List[Dict[str, str]], tools: List[Dict[str, Any]]) -> Dict[str, Any]:
32
+ """Get request payload for OpenRouter API."""
33
+ return {
34
+ "model": self.model,
35
+ "messages": messages,
36
+ "tools": tools,
37
+ "tool_choice": "auto",
38
+ }
53
39
 
54
- self.logger.info(f"Request sent - Token count: {total_tokens}")
55
- # self.logger.debug(f"Raw request payload: {json.dumps(payload, indent=2)}")
40
+ def _get_api_endpoint(self) -> str:
41
+ """Get OpenRouter API endpoint."""
42
+ return f"{self.base_url}/chat/completions"
56
43
 
57
- def _log_response_details(
58
- self, response: Dict[str, Any], start_time: float
59
- ) -> None:
60
- """Log response details including token count and latency."""
44
+ def _process_response(self, response_data: Dict[str, Any], start_time: float) -> None:
45
+ """Process and log OpenRouter response details."""
46
+ import time
47
+
61
48
  end_time = time.time()
62
49
  latency_ms = (end_time - start_time) * 1000
63
50
 
64
51
  # Extract token usage from response if available
65
- usage = response.get("usage", {})
52
+ usage = response_data.get("usage", {})
66
53
  prompt_tokens = usage.get("prompt_tokens", "unknown")
67
54
  completion_tokens = usage.get("completion_tokens", "unknown")
68
55
  total_tokens = usage.get("total_tokens", "unknown")
@@ -73,33 +60,33 @@ class OpenRouterClient(LLMClient):
73
60
  )
74
61
 
75
62
  # Extract and log choice details
76
- choices = response.get("choices", [])
63
+ choices = response_data.get("choices", [])
77
64
  if not choices:
78
65
  return
79
-
66
+
80
67
  choice = choices[0]
81
68
  message = choice.get("message", {})
82
-
69
+
83
70
  # Always log reasoning and content if present
84
71
  reasoning = message.get("reasoning", "")
85
72
  if reasoning:
86
73
  self.logger.info(f"LLM reasoning: {reasoning}")
87
-
74
+
88
75
  content = message.get("content", "")
89
76
  if content:
90
77
  self.logger.info(f"LLM content: {content}")
91
-
78
+
92
79
  # Handle tool calls
93
80
  tool_calls = message.get("tool_calls", [])
94
81
  if tool_calls:
95
82
  self.logger.info(f"Response contains {len(tool_calls)} tool calls")
96
-
83
+
97
84
  # Log each tool call
98
85
  for i, tool_call in enumerate(tool_calls, 1):
99
86
  tool_name = tool_call.get("function", {}).get("name", "unknown")
100
87
  self.logger.info(f" Tool call {i}: {tool_name}")
101
88
 
102
- self.logger.debug(f"Raw response: {json.dumps(response, indent=2)}")
89
+ self.logger.debug(f"Raw response: {response_data}")
103
90
 
104
91
  def chat_with_tools(
105
92
  self, messages: List[Dict[str, str]], tools: List[Dict[str, Any]]
@@ -114,33 +101,7 @@ class OpenRouterClient(LLMClient):
114
101
  Returns:
115
102
  API response dictionary
116
103
  """
117
- headers = {
118
- "Authorization": f"Bearer {self.api_key}",
119
- "Content-Type": "application/json",
120
- }
121
-
122
- payload = {
123
- "model": self.model,
124
- "messages": messages,
125
- "tools": tools,
126
- "tool_choice": "auto",
127
- }
128
-
129
- start_time = time.time()
130
- self._log_request_details(payload, start_time)
131
-
132
- response = requests.post( # nosec B113
133
- f"{self.base_url}/chat/completions", headers=headers, json=payload
134
- )
135
-
136
- if response.status_code != 200:
137
- self.logger.error(f"OpenRouter API error: {response.text}")
138
- raise Exception(f"OpenRouter API error: {response.text}")
139
-
140
- response_data: Dict[str, Any] = response.json()
141
- self._log_response_details(response_data, start_time)
142
-
143
- return response_data
104
+ return self._make_http_request(messages, tools)
144
105
 
145
106
  def continue_with_tool_result(self, tool_result: Dict[str, Any]) -> Dict[str, Any]:
146
107
  """
@@ -157,13 +118,24 @@ class OpenRouterClient(LLMClient):
157
118
 
158
119
  def extract_tool_calls(self, response: Dict[str, Any]) -> List[Dict[str, Any]]:
159
120
  """Extract tool calls from API response."""
121
+ # Check for provider errors first
122
+ if response.get("error", False):
123
+ self.logger.warning(f"Cannot extract tool calls from error response: {response.get('error_type')}")
124
+ return []
125
+
160
126
  tool_calls = []
161
127
  if response.get("choices"):
162
128
  choice = response["choices"][0]
163
129
  if "message" in choice and "tool_calls" in choice["message"]:
164
- tool_calls = choice["message"]["tool_calls"]
130
+ raw_tool_calls = choice["message"]["tool_calls"]
131
+
132
+ # Validate each tool call using common validation
133
+ for i, tool_call in enumerate(raw_tool_calls):
134
+ if self._validate_tool_call(tool_call, i):
135
+ tool_calls.append(tool_call)
136
+
165
137
  self.logger.debug(
166
- f"Extracted {len(tool_calls)} tool calls from response"
138
+ f"Extracted {len(tool_calls)} valid tool calls from {len(raw_tool_calls)} total"
167
139
  )
168
140
  for i, tool_call in enumerate(tool_calls):
169
141
  tool_name = tool_call.get("function", {}).get("name", "unknown")
@@ -179,6 +151,11 @@ class OpenRouterClient(LLMClient):
179
151
 
180
152
  def extract_content(self, response: Dict[str, Any]) -> str:
181
153
  """Extract content from API response."""
154
+ # Check for provider errors first
155
+ if response.get("error", False):
156
+ self.logger.warning(f"Cannot extract content from error response: {response.get('error_type')}")
157
+ return ""
158
+
182
159
  if response.get("choices"):
183
160
  choice = response["choices"][0]
184
161
  if "message" in choice and "content" in choice["message"]:
@@ -194,3 +171,23 @@ class OpenRouterClient(LLMClient):
194
171
  Model name string
195
172
  """
196
173
  return self.model
174
+
175
+ def get_provider_name(self) -> str:
176
+ """
177
+ Get the provider name for this client.
178
+
179
+ Returns:
180
+ Provider name string
181
+ """
182
+ return "openrouter"
183
+
184
+ def get_request_timeout(self) -> int:
185
+ """
186
+ Get the request timeout in seconds for OpenRouter.
187
+
188
+ Cloud APIs typically respond quickly, so we use a 30-second timeout.
189
+
190
+ Returns:
191
+ Timeout value in seconds (30)
192
+ """
193
+ return 30