todo-agent 0.3.1__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,12 +2,33 @@
2
2
  Abstract LLM client interface for todo.sh agent.
3
3
  """
4
4
 
5
+ import json
6
+ import time
5
7
  from abc import ABC, abstractmethod
6
8
  from typing import Any, Dict, List
7
9
 
10
+ import requests
11
+
12
+ from todo_agent.infrastructure.logger import Logger
13
+ from todo_agent.infrastructure.token_counter import get_token_counter
14
+
8
15
 
9
16
  class LLMClient(ABC):
10
- """Abstract interface for LLM clients."""
17
+ """Abstract interface for LLM clients with common functionality."""
18
+
19
+ def __init__(self, config: Any, model: str, logger_name: str = "llm_client"):
20
+ """
21
+ Initialize common LLM client functionality.
22
+
23
+ Args:
24
+ config: Configuration object
25
+ model: Model name for token counting
26
+ logger_name: Logger name for this client
27
+ """
28
+ self.config = config
29
+ self.model = model
30
+ self.logger = Logger(logger_name)
31
+ self.token_counter = get_token_counter(model)
11
32
 
12
33
  @abstractmethod
13
34
  def chat_with_tools(
@@ -60,3 +81,205 @@ class LLMClient(ABC):
60
81
  Model name string
61
82
  """
62
83
  pass
84
+
85
+ @abstractmethod
86
+ def _get_request_headers(self) -> Dict[str, str]:
87
+ """
88
+ Get request headers for the API call.
89
+
90
+ Returns:
91
+ Dictionary of headers
92
+ """
93
+ pass
94
+
95
+ @abstractmethod
96
+ def _get_request_payload(self, messages: List[Dict[str, str]], tools: List[Dict[str, Any]]) -> Dict[str, Any]:
97
+ """
98
+ Get request payload for the API call.
99
+
100
+ Args:
101
+ messages: List of message dictionaries
102
+ tools: List of tool definitions
103
+
104
+ Returns:
105
+ Request payload dictionary
106
+ """
107
+ pass
108
+
109
+ @abstractmethod
110
+ def _get_api_endpoint(self) -> str:
111
+ """
112
+ Get the API endpoint for requests.
113
+
114
+ Returns:
115
+ API endpoint URL
116
+ """
117
+ pass
118
+
119
+ @abstractmethod
120
+ def _process_response(self, response_data: Dict[str, Any], start_time: float) -> None:
121
+ """
122
+ Process and log response details.
123
+
124
+ Args:
125
+ response_data: Response data from API
126
+ start_time: Request start time for latency calculation
127
+ """
128
+ pass
129
+
130
+ def _log_request_details(self, payload: Dict[str, Any], start_time: float) -> None:
131
+ """Log request details including accurate token count."""
132
+ messages = payload.get("messages", [])
133
+ tools = payload.get("tools", [])
134
+
135
+ total_tokens = self.token_counter.count_request_tokens(messages, tools)
136
+ self.logger.info(f"Request sent - Token count: {total_tokens}")
137
+
138
+ def _make_http_request(self, messages: List[Dict[str, str]], tools: List[Dict[str, Any]]) -> Dict[str, Any]:
139
+ """
140
+ Make HTTP request to the LLM API with common error handling.
141
+
142
+ Args:
143
+ messages: List of message dictionaries
144
+ tools: List of tool definitions
145
+
146
+ Returns:
147
+ API response dictionary
148
+ """
149
+ headers = self._get_request_headers()
150
+ payload = self._get_request_payload(messages, tools)
151
+ endpoint = self._get_api_endpoint()
152
+
153
+ start_time = time.time()
154
+ self._log_request_details(payload, start_time)
155
+
156
+ try:
157
+ response = requests.post( # nosec B113
158
+ endpoint, headers=headers, json=payload, timeout=self.get_request_timeout()
159
+ )
160
+ except requests.exceptions.Timeout:
161
+ self.logger.error(f"{self.get_provider_name()} API request timed out")
162
+ return self._create_error_response("timeout", "Request timed out")
163
+ except requests.exceptions.ConnectionError as e:
164
+ self.logger.error(f"{self.get_provider_name()} API connection error: {e}")
165
+ return self._create_error_response("timeout", f"Connection error: {e}")
166
+ except requests.exceptions.RequestException as e:
167
+ self.logger.error(f"{self.get_provider_name()} API request error: {e}")
168
+ return self._create_error_response("general_error", f"Request error: {e}")
169
+
170
+ if response.status_code != 200:
171
+ self.logger.error(f"{self.get_provider_name()} API error: {response.text}")
172
+ error_type = self.classify_error(Exception(response.text), self.get_provider_name())
173
+ return self._create_error_response(error_type, response.text, response.status_code)
174
+
175
+ try:
176
+ response_data: Dict[str, Any] = response.json()
177
+ except Exception as e:
178
+ self.logger.error(f"Failed to parse {self.get_provider_name()} response JSON: {e}")
179
+ return self._create_error_response("malformed_response", f"JSON parsing failed: {e}", response.status_code)
180
+
181
+ self._process_response(response_data, start_time)
182
+ return response_data
183
+
184
+ def _create_error_response(self, error_type: str, raw_error: str, status_code: int = 0) -> Dict[str, Any]:
185
+ """
186
+ Create standardized error response.
187
+
188
+ Args:
189
+ error_type: Type of error
190
+ raw_error: Raw error message
191
+ status_code: HTTP status code if available
192
+
193
+ Returns:
194
+ Standardized error response dictionary
195
+ """
196
+ return {
197
+ "error": True,
198
+ "error_type": error_type,
199
+ "provider": self.get_provider_name(),
200
+ "status_code": status_code,
201
+ "raw_error": raw_error
202
+ }
203
+
204
+ def _validate_tool_call(self, tool_call: Any, index: int) -> bool:
205
+ """
206
+ Validate a tool call structure.
207
+
208
+ Args:
209
+ tool_call: Tool call to validate
210
+ index: Index of the tool call for logging
211
+
212
+ Returns:
213
+ True if valid, False otherwise
214
+ """
215
+ try:
216
+ if not isinstance(tool_call, dict):
217
+ self.logger.warning(f"Tool call {index+1} is not a dictionary: {tool_call}")
218
+ return False
219
+
220
+ function = tool_call.get("function", {})
221
+ if not isinstance(function, dict):
222
+ self.logger.warning(f"Tool call {index+1} function is not a dictionary: {function}")
223
+ return False
224
+
225
+ tool_name = function.get("name")
226
+ if not tool_name:
227
+ self.logger.warning(f"Tool call {index+1} missing function name: {tool_call}")
228
+ return False
229
+
230
+ arguments = function.get("arguments", "{}")
231
+ if arguments and not isinstance(arguments, str):
232
+ self.logger.warning(f"Tool call {index+1} arguments not a string: {arguments}")
233
+ return False
234
+
235
+ return True
236
+ except Exception as e:
237
+ self.logger.warning(f"Error validating tool call {index+1}: {e}")
238
+ return False
239
+
240
+ def classify_error(self, error: Exception, provider: str) -> str:
241
+ """
242
+ Classify provider errors using simple string matching.
243
+
244
+ Args:
245
+ error: The exception that occurred
246
+ provider: The provider name (e.g., 'openrouter', 'ollama')
247
+
248
+ Returns:
249
+ Error type string for message lookup
250
+ """
251
+ error_str = str(error).lower()
252
+
253
+ if "malformed" in error_str or "invalid" in error_str or "parse" in error_str:
254
+ return "malformed_response"
255
+ elif "rate limit" in error_str or "429" in error_str or "too many requests" in error_str:
256
+ return "rate_limit"
257
+ elif "unauthorized" in error_str or "401" in error_str or "authentication" in error_str:
258
+ return "auth_error"
259
+ elif "timeout" in error_str or "timed out" in error_str:
260
+ return "timeout"
261
+ elif "connection" in error_str or "network" in error_str or "dns" in error_str:
262
+ return "timeout" # Treat connection issues as timeouts for user messaging
263
+ elif "refused" in error_str or "unreachable" in error_str:
264
+ return "timeout" # Connection refused is similar to timeout for users
265
+ else:
266
+ return "general_error"
267
+
268
+ @abstractmethod
269
+ def get_provider_name(self) -> str:
270
+ """
271
+ Get the provider name for this client.
272
+
273
+ Returns:
274
+ Provider name string
275
+ """
276
+ pass
277
+
278
+ def get_request_timeout(self) -> int:
279
+ """
280
+ Get the request timeout in seconds for this provider.
281
+
282
+ Returns:
283
+ Timeout value in seconds (default: 30)
284
+ """
285
+ return 30
@@ -2,92 +2,72 @@
2
2
  LLM client for Ollama API communication.
3
3
  """
4
4
 
5
- import json
6
- import time
7
5
  from typing import Any, Dict, List
8
6
 
9
- import requests
10
-
11
- try:
12
- from todo_agent.infrastructure.config import Config
13
- from todo_agent.infrastructure.llm_client import LLMClient
14
- from todo_agent.infrastructure.logger import Logger
15
- from todo_agent.infrastructure.token_counter import get_token_counter
16
- except ImportError:
17
- from infrastructure.config import Config # type: ignore[no-redef]
18
- from infrastructure.llm_client import LLMClient # type: ignore[no-redef]
19
- from infrastructure.logger import Logger # type: ignore[no-redef]
20
- from infrastructure.token_counter import get_token_counter # type: ignore[no-redef]
7
+ from todo_agent.infrastructure.llm_client import LLMClient
21
8
 
22
9
 
23
10
  class OllamaClient(LLMClient):
24
11
  """Ollama API client implementation."""
25
12
 
26
- def __init__(self, config: Config):
13
+ def __init__(self, config):
27
14
  """
28
15
  Initialize Ollama client.
29
16
 
30
17
  Args:
31
18
  config: Configuration object
32
19
  """
33
- self.config = config
20
+ super().__init__(config, config.ollama_model, "ollama_client")
34
21
  self.base_url = config.ollama_base_url
35
- self.model = config.ollama_model
36
- self.logger = Logger("ollama_client")
37
- self.token_counter = get_token_counter(self.model)
38
-
39
- def _estimate_tokens(self, text: str) -> int:
40
- """
41
- Estimate token count for text using accurate tokenization.
42
-
43
- Args:
44
- text: Text to count tokens for
45
22
 
46
- Returns:
47
- Number of tokens
48
- """
49
- return self.token_counter.count_tokens(text)
50
-
51
- def _log_request_details(self, payload: Dict[str, Any], start_time: float) -> None:
52
- """Log request details including accurate token count."""
53
- # Count tokens for messages
54
- messages = payload.get("messages", [])
55
- tools = payload.get("tools", [])
23
+ def _get_request_headers(self) -> Dict[str, str]:
24
+ """Get request headers for Ollama API."""
25
+ return {
26
+ "Content-Type": "application/json",
27
+ }
56
28
 
57
- total_tokens = self.token_counter.count_request_tokens(messages, tools)
29
+ def _get_request_payload(self, messages: List[Dict[str, str]], tools: List[Dict[str, Any]]) -> Dict[str, Any]:
30
+ """Get request payload for Ollama API."""
31
+ return {
32
+ "model": self.model,
33
+ "messages": messages,
34
+ "tools": tools,
35
+ "stream": False,
36
+ }
58
37
 
59
- self.logger.info(f"Request sent - Token count: {total_tokens}")
60
- # self.logger.debug(f"Raw request payload: {json.dumps(payload, indent=2)}")
38
+ def _get_api_endpoint(self) -> str:
39
+ """Get Ollama API endpoint."""
40
+ return f"{self.base_url}/api/chat"
61
41
 
62
- def _log_response_details(
63
- self, response: Dict[str, Any], start_time: float
64
- ) -> None:
65
- """Log response details including latency."""
42
+ def _process_response(self, response_data: Dict[str, Any], start_time: float) -> None:
43
+ """Process and log Ollama response details."""
44
+ import time
45
+
66
46
  end_time = time.time()
67
47
  latency_ms = (end_time - start_time) * 1000
68
48
 
69
49
  self.logger.info(f"Response received - Latency: {latency_ms:.2f}ms")
70
50
 
71
51
  # Log tool call details if present
72
- if "message" in response and "tool_calls" in response["message"]:
73
- tool_calls = response["message"]["tool_calls"]
52
+ if "message" in response_data and "tool_calls" in response_data["message"]:
53
+ tool_calls = response_data["message"]["tool_calls"]
74
54
  self.logger.info(f"Response contains {len(tool_calls)} tool calls")
75
55
 
76
56
  # Log thinking content (response body) if present
77
- content = response["message"].get("content", "")
57
+ content = response_data["message"].get("content", "")
78
58
  if content and content.strip():
79
59
  self.logger.info(f"LLM thinking before tool calls: {content}")
80
60
 
81
61
  for i, tool_call in enumerate(tool_calls):
82
62
  tool_name = tool_call.get("function", {}).get("name", "unknown")
83
63
  self.logger.info(f" Tool call {i + 1}: {tool_name}")
84
- elif "message" in response and "content" in response["message"]:
85
- content = response["message"]["content"]
64
+ elif "message" in response_data and "content" in response_data["message"]:
65
+ content = response_data["message"]["content"]
86
66
  self.logger.debug(
87
67
  f"Response contains content: {content[:100]}{'...' if len(content) > 100 else ''}"
88
68
  )
89
69
 
90
- self.logger.debug(f"Raw response: {json.dumps(response, indent=2)}")
70
+ self.logger.debug(f"Raw response: {response_data}")
91
71
 
92
72
  def chat_with_tools(
93
73
  self, messages: List[Dict[str, str]], tools: List[Dict[str, Any]]
@@ -102,41 +82,27 @@ class OllamaClient(LLMClient):
102
82
  Returns:
103
83
  API response dictionary
104
84
  """
105
- headers = {
106
- "Content-Type": "application/json",
107
- }
108
-
109
- payload = {
110
- "model": self.model,
111
- "messages": messages,
112
- "tools": tools,
113
- "stream": False,
114
- }
115
-
116
- start_time = time.time()
117
- self._log_request_details(payload, start_time)
118
-
119
- response = requests.post( # nosec B113
120
- f"{self.base_url}/api/chat", headers=headers, json=payload
121
- )
122
-
123
- if response.status_code != 200:
124
- self.logger.error(f"Ollama API error: {response.text}")
125
- raise Exception(f"Ollama API error: {response.text}")
126
-
127
- response_data: Dict[str, Any] = response.json()
128
- self._log_response_details(response_data, start_time)
129
-
130
- return response_data
85
+ return self._make_http_request(messages, tools)
131
86
 
132
87
  def extract_tool_calls(self, response: Dict[str, Any]) -> List[Dict[str, Any]]:
133
88
  """Extract tool calls from API response."""
89
+ # Check for provider errors first
90
+ if response.get("error", False):
91
+ self.logger.warning(f"Cannot extract tool calls from error response: {response.get('error_type')}")
92
+ return []
93
+
134
94
  tool_calls = []
135
95
 
136
96
  # Ollama response format is different from OpenRouter
137
97
  if "message" in response and "tool_calls" in response["message"]:
138
- tool_calls = response["message"]["tool_calls"]
139
- self.logger.debug(f"Extracted {len(tool_calls)} tool calls from response")
98
+ raw_tool_calls = response["message"]["tool_calls"]
99
+
100
+ # Validate each tool call using common validation
101
+ for i, tool_call in enumerate(raw_tool_calls):
102
+ if self._validate_tool_call(tool_call, i):
103
+ tool_calls.append(tool_call)
104
+
105
+ self.logger.debug(f"Extracted {len(tool_calls)} valid tool calls from {len(raw_tool_calls)} total")
140
106
  for i, tool_call in enumerate(tool_calls):
141
107
  tool_name = tool_call.get("function", {}).get("name", "unknown")
142
108
  tool_call_id = tool_call.get("id", "unknown")
@@ -150,6 +116,11 @@ class OllamaClient(LLMClient):
150
116
 
151
117
  def extract_content(self, response: Dict[str, Any]) -> str:
152
118
  """Extract content from API response."""
119
+ # Check for provider errors first
120
+ if response.get("error", False):
121
+ self.logger.warning(f"Cannot extract content from error response: {response.get('error_type')}")
122
+ return ""
123
+
153
124
  if "message" in response and "content" in response["message"]:
154
125
  content = response["message"]["content"]
155
126
  return content if isinstance(content, str) else str(content)
@@ -163,3 +134,23 @@ class OllamaClient(LLMClient):
163
134
  Model name string
164
135
  """
165
136
  return self.model
137
+
138
+ def get_provider_name(self) -> str:
139
+ """
140
+ Get the provider name for this client.
141
+
142
+ Returns:
143
+ Provider name string
144
+ """
145
+ return "ollama"
146
+
147
+ def get_request_timeout(self) -> int:
148
+ """
149
+ Get the request timeout in seconds for Ollama.
150
+
151
+ Ollama can be slower than cloud providers, so we use a 2-minute timeout.
152
+
153
+ Returns:
154
+ Timeout value in seconds (120)
155
+ """
156
+ return 120
@@ -2,67 +2,54 @@
2
2
  LLM client for OpenRouter API communication.
3
3
  """
4
4
 
5
- import json
6
- import time
7
5
  from typing import Any, Dict, List
8
6
 
9
- import requests
10
-
11
- try:
12
- from todo_agent.infrastructure.config import Config
13
- from todo_agent.infrastructure.llm_client import LLMClient
14
- from todo_agent.infrastructure.logger import Logger
15
- from todo_agent.infrastructure.token_counter import get_token_counter
16
- except ImportError:
17
- from infrastructure.config import Config # type: ignore[no-redef]
18
- from infrastructure.llm_client import LLMClient # type: ignore[no-redef]
19
- from infrastructure.logger import Logger # type: ignore[no-redef]
20
- from infrastructure.token_counter import get_token_counter # type: ignore[no-redef]
7
+ from todo_agent.infrastructure.llm_client import LLMClient
21
8
 
22
9
 
23
10
  class OpenRouterClient(LLMClient):
24
11
  """LLM API communication and response handling."""
25
12
 
26
- def __init__(self, config: Config):
27
- self.config = config
28
- self.api_key = config.openrouter_api_key
29
- self.model = config.model
30
- self.base_url = "https://openrouter.ai/api/v1"
31
- self.logger = Logger("openrouter_client")
32
- self.token_counter = get_token_counter(self.model)
33
-
34
- def _estimate_tokens(self, text: str) -> int:
13
+ def __init__(self, config):
35
14
  """
36
- Estimate token count for text using accurate tokenization.
15
+ Initialize OpenRouter client.
37
16
 
38
17
  Args:
39
- text: Text to count tokens for
40
-
41
- Returns:
42
- Number of tokens
18
+ config: Configuration object
43
19
  """
44
- return self.token_counter.count_tokens(text)
20
+ super().__init__(config, config.model, "openrouter_client")
21
+ self.api_key = config.openrouter_api_key
22
+ self.base_url = "https://openrouter.ai/api/v1"
45
23
 
46
- def _log_request_details(self, payload: Dict[str, Any], start_time: float) -> None:
47
- """Log request details including accurate token count."""
48
- # Count tokens for messages
49
- messages = payload.get("messages", [])
50
- tools = payload.get("tools", [])
24
+ def _get_request_headers(self) -> Dict[str, str]:
25
+ """Get request headers for OpenRouter API."""
26
+ return {
27
+ "Authorization": f"Bearer {self.api_key}",
28
+ "Content-Type": "application/json",
29
+ }
51
30
 
52
- total_tokens = self.token_counter.count_request_tokens(messages, tools)
31
+ def _get_request_payload(self, messages: List[Dict[str, str]], tools: List[Dict[str, Any]]) -> Dict[str, Any]:
32
+ """Get request payload for OpenRouter API."""
33
+ return {
34
+ "model": self.model,
35
+ "messages": messages,
36
+ "tools": tools,
37
+ "tool_choice": "auto",
38
+ }
53
39
 
54
- self.logger.info(f"Request sent - Token count: {total_tokens}")
55
- # self.logger.debug(f"Raw request payload: {json.dumps(payload, indent=2)}")
40
+ def _get_api_endpoint(self) -> str:
41
+ """Get OpenRouter API endpoint."""
42
+ return f"{self.base_url}/chat/completions"
56
43
 
57
- def _log_response_details(
58
- self, response: Dict[str, Any], start_time: float
59
- ) -> None:
60
- """Log response details including token count and latency."""
44
+ def _process_response(self, response_data: Dict[str, Any], start_time: float) -> None:
45
+ """Process and log OpenRouter response details."""
46
+ import time
47
+
61
48
  end_time = time.time()
62
49
  latency_ms = (end_time - start_time) * 1000
63
50
 
64
51
  # Extract token usage from response if available
65
- usage = response.get("usage", {})
52
+ usage = response_data.get("usage", {})
66
53
  prompt_tokens = usage.get("prompt_tokens", "unknown")
67
54
  completion_tokens = usage.get("completion_tokens", "unknown")
68
55
  total_tokens = usage.get("total_tokens", "unknown")
@@ -73,7 +60,7 @@ class OpenRouterClient(LLMClient):
73
60
  )
74
61
 
75
62
  # Extract and log choice details
76
- choices = response.get("choices", [])
63
+ choices = response_data.get("choices", [])
77
64
  if not choices:
78
65
  return
79
66
 
@@ -99,7 +86,7 @@ class OpenRouterClient(LLMClient):
99
86
  tool_name = tool_call.get("function", {}).get("name", "unknown")
100
87
  self.logger.info(f" Tool call {i}: {tool_name}")
101
88
 
102
- self.logger.debug(f"Raw response: {json.dumps(response, indent=2)}")
89
+ self.logger.debug(f"Raw response: {response_data}")
103
90
 
104
91
  def chat_with_tools(
105
92
  self, messages: List[Dict[str, str]], tools: List[Dict[str, Any]]
@@ -114,33 +101,7 @@ class OpenRouterClient(LLMClient):
114
101
  Returns:
115
102
  API response dictionary
116
103
  """
117
- headers = {
118
- "Authorization": f"Bearer {self.api_key}",
119
- "Content-Type": "application/json",
120
- }
121
-
122
- payload = {
123
- "model": self.model,
124
- "messages": messages,
125
- "tools": tools,
126
- "tool_choice": "auto",
127
- }
128
-
129
- start_time = time.time()
130
- self._log_request_details(payload, start_time)
131
-
132
- response = requests.post( # nosec B113
133
- f"{self.base_url}/chat/completions", headers=headers, json=payload
134
- )
135
-
136
- if response.status_code != 200:
137
- self.logger.error(f"OpenRouter API error: {response.text}")
138
- raise Exception(f"OpenRouter API error: {response.text}")
139
-
140
- response_data: Dict[str, Any] = response.json()
141
- self._log_response_details(response_data, start_time)
142
-
143
- return response_data
104
+ return self._make_http_request(messages, tools)
144
105
 
145
106
  def continue_with_tool_result(self, tool_result: Dict[str, Any]) -> Dict[str, Any]:
146
107
  """
@@ -157,13 +118,24 @@ class OpenRouterClient(LLMClient):
157
118
 
158
119
  def extract_tool_calls(self, response: Dict[str, Any]) -> List[Dict[str, Any]]:
159
120
  """Extract tool calls from API response."""
121
+ # Check for provider errors first
122
+ if response.get("error", False):
123
+ self.logger.warning(f"Cannot extract tool calls from error response: {response.get('error_type')}")
124
+ return []
125
+
160
126
  tool_calls = []
161
127
  if response.get("choices"):
162
128
  choice = response["choices"][0]
163
129
  if "message" in choice and "tool_calls" in choice["message"]:
164
- tool_calls = choice["message"]["tool_calls"]
130
+ raw_tool_calls = choice["message"]["tool_calls"]
131
+
132
+ # Validate each tool call using common validation
133
+ for i, tool_call in enumerate(raw_tool_calls):
134
+ if self._validate_tool_call(tool_call, i):
135
+ tool_calls.append(tool_call)
136
+
165
137
  self.logger.debug(
166
- f"Extracted {len(tool_calls)} tool calls from response"
138
+ f"Extracted {len(tool_calls)} valid tool calls from {len(raw_tool_calls)} total"
167
139
  )
168
140
  for i, tool_call in enumerate(tool_calls):
169
141
  tool_name = tool_call.get("function", {}).get("name", "unknown")
@@ -179,6 +151,11 @@ class OpenRouterClient(LLMClient):
179
151
 
180
152
  def extract_content(self, response: Dict[str, Any]) -> str:
181
153
  """Extract content from API response."""
154
+ # Check for provider errors first
155
+ if response.get("error", False):
156
+ self.logger.warning(f"Cannot extract content from error response: {response.get('error_type')}")
157
+ return ""
158
+
182
159
  if response.get("choices"):
183
160
  choice = response["choices"][0]
184
161
  if "message" in choice and "content" in choice["message"]:
@@ -194,3 +171,23 @@ class OpenRouterClient(LLMClient):
194
171
  Model name string
195
172
  """
196
173
  return self.model
174
+
175
+ def get_provider_name(self) -> str:
176
+ """
177
+ Get the provider name for this client.
178
+
179
+ Returns:
180
+ Provider name string
181
+ """
182
+ return "openrouter"
183
+
184
+ def get_request_timeout(self) -> int:
185
+ """
186
+ Get the request timeout in seconds for OpenRouter.
187
+
188
+ Cloud APIs typically respond quickly, so we use a 30-second timeout.
189
+
190
+ Returns:
191
+ Timeout value in seconds (30)
192
+ """
193
+ return 30