sqlsaber 0.7.0__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sqlsaber might be problematic. Click here for more details.

@@ -0,0 +1,285 @@
1
+ """Anthropic API client implementation."""
2
+
3
+ import asyncio
4
+ import json
5
+ import logging
6
+ from typing import Any, AsyncIterator
7
+
8
+ import httpx
9
+
10
+ from .base import BaseLLMClient
11
+ from .exceptions import LLMClientError, create_exception_from_response
12
+ from .models import CreateMessageRequest
13
+ from .streaming import AnthropicStreamAdapter, StreamingResponse
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class AnthropicClient(BaseLLMClient):
19
+ """Client for Anthropic's Claude API."""
20
+
21
+ def __init__(
22
+ self,
23
+ api_key: str | None = None,
24
+ oauth_token: str | None = None,
25
+ base_url: str | None = None,
26
+ ):
27
+ """Initialize the Anthropic client.
28
+
29
+ Args:
30
+ api_key: Anthropic API key
31
+ base_url: Base URL for the API (defaults to Anthropic's API)
32
+ """
33
+ super().__init__(api_key or "", base_url)
34
+
35
+ if not api_key and not oauth_token:
36
+ raise ValueError("Either api_key or oauth_token must be provided")
37
+
38
+ self.oauth_token = oauth_token
39
+ self.use_oauth = oauth_token is not None
40
+ self.base_url = base_url or "https://api.anthropic.com"
41
+ self.client: httpx.AsyncClient | None = None
42
+
43
+ def _get_client(self) -> httpx.AsyncClient:
44
+ """Get or create the HTTP client."""
45
+ if self.client is None or self.client.is_closed:
46
+ # Configure timeouts and connection limits for reliability
47
+ timeout = httpx.Timeout(
48
+ connect=10.0, # Connection timeout
49
+ read=60.0, # Read timeout for streaming
50
+ write=10.0, # Write timeout
51
+ pool=10.0, # Pool timeout
52
+ )
53
+ limits = httpx.Limits(
54
+ max_keepalive_connections=20, max_connections=100, keepalive_expiry=30.0
55
+ )
56
+ self.client = httpx.AsyncClient(
57
+ timeout=timeout, limits=limits, follow_redirects=True
58
+ )
59
+ return self.client
60
+
61
+ def _get_headers(self) -> dict[str, str]:
62
+ """Get the standard headers for API requests."""
63
+ if self.use_oauth:
64
+ # OAuth headers for Claude Pro authentication (matching Claude Code CLI)
65
+ return {
66
+ "Authorization": f"Bearer {self.oauth_token}",
67
+ "Content-Type": "application/json",
68
+ "anthropic-version": "2023-06-01",
69
+ "anthropic-beta": "oauth-2025-04-20",
70
+ "User-Agent": "ClaudeCode/1.0 (Anthropic Claude Code CLI)",
71
+ "Accept": "application/json",
72
+ "X-Client-Name": "claude-code",
73
+ "X-Client-Version": "1.0.0",
74
+ }
75
+ else:
76
+ # API key headers for standard authentication
77
+ return {
78
+ "x-api-key": self.api_key,
79
+ "anthropic-version": "2023-06-01",
80
+ "content-type": "application/json",
81
+ }
82
+
83
+ async def create_message_with_tools(
84
+ self,
85
+ request: CreateMessageRequest,
86
+ cancellation_token: asyncio.Event | None = None,
87
+ ) -> AsyncIterator[Any]:
88
+ """Create a message with tool support and stream the response.
89
+
90
+ This method handles the full message creation flow including tool use,
91
+ similar to what the current AnthropicSQLAgent expects.
92
+
93
+ Args:
94
+ request: The message creation request
95
+ cancellation_token: Optional event to signal cancellation
96
+
97
+ Yields:
98
+ Stream events and final StreamingResponse
99
+ """
100
+ request.stream = True
101
+
102
+ client = self._get_client()
103
+ url = f"{self.base_url}/v1/messages"
104
+ headers = self._get_headers()
105
+ data = request.to_dict()
106
+
107
+ try:
108
+ async with client.stream(
109
+ "POST", url, headers=headers, json=data
110
+ ) as response:
111
+ request_id = response.headers.get("request-id")
112
+
113
+ if response.status_code != 200:
114
+ response_content = await response.aread()
115
+ response_data = json.loads(response_content.decode())
116
+ raise create_exception_from_response(
117
+ response.status_code, response_data, request_id
118
+ )
119
+
120
+ # Use stream adapter to convert raw events and track state
121
+ adapter = AnthropicStreamAdapter()
122
+ raw_stream = self._process_sse_stream(response, cancellation_token)
123
+
124
+ async for event in adapter.process_stream(
125
+ raw_stream, cancellation_token
126
+ ):
127
+ yield event
128
+
129
+ # Create final response object with proper state
130
+ response_obj = StreamingResponse(
131
+ content=adapter.get_content_blocks(),
132
+ stop_reason=adapter.get_stop_reason(),
133
+ )
134
+
135
+ # Yield special event with response
136
+ yield {"type": "response_ready", "data": response_obj}
137
+
138
+ except asyncio.CancelledError:
139
+ # Handle cancellation gracefully
140
+ logger.debug("Stream cancelled")
141
+ return
142
+ except Exception as e:
143
+ if not isinstance(e, LLMClientError):
144
+ raise LLMClientError(f"Stream processing error: {str(e)}")
145
+ raise
146
+
147
+ def _handle_ping_event(self, event_data: str) -> dict[str, Any]:
148
+ """Handle ping event data.
149
+
150
+ Args:
151
+ event_data: Raw event data string
152
+
153
+ Returns:
154
+ Parsed ping event
155
+ """
156
+ try:
157
+ return {"type": "ping", "data": json.loads(event_data)}
158
+ except json.JSONDecodeError:
159
+ return {"type": "ping", "data": {}}
160
+
161
+ def _handle_error_event(self, event_data: str) -> None:
162
+ """Handle error event data.
163
+
164
+ Args:
165
+ event_data: Raw event data string
166
+
167
+ Raises:
168
+ LLMClientError: Always raises with error details
169
+ """
170
+ try:
171
+ error_data = json.loads(event_data)
172
+ raise LLMClientError(
173
+ error_data.get("message", "Stream error"),
174
+ error_data.get("type", "stream_error"),
175
+ )
176
+ except json.JSONDecodeError:
177
+ raise LLMClientError("Stream error with invalid JSON")
178
+
179
+ def _parse_event_data(
180
+ self, event_type: str | None, event_data: str
181
+ ) -> dict[str, Any] | None:
182
+ """Parse event data based on event type.
183
+
184
+ Args:
185
+ event_type: Type of the event
186
+ event_data: Raw event data string
187
+
188
+ Returns:
189
+ Parsed event or None if parsing failed
190
+ """
191
+ try:
192
+ parsed_data = json.loads(event_data)
193
+ return {"type": event_type, "data": parsed_data}
194
+ except json.JSONDecodeError as e:
195
+ logger.warning(f"Failed to parse stream data for event {event_type}: {e}")
196
+ return None
197
+
198
+ def _process_sse_line(
199
+ self, line: str, event_type: str | None
200
+ ) -> tuple[str | None, dict[str, Any] | None]:
201
+ """Process a single SSE line.
202
+
203
+ Args:
204
+ line: Line to process
205
+ event_type: Current event type
206
+
207
+ Returns:
208
+ Tuple of (new_event_type, event_to_yield)
209
+ """
210
+ if line.startswith("event: "):
211
+ return line[7:], None
212
+ elif line.startswith("data: "):
213
+ event_data = line[6:]
214
+
215
+ if event_type == "ping":
216
+ return event_type, self._handle_ping_event(event_data)
217
+ elif event_type == "error":
218
+ self._handle_error_event(event_data)
219
+ return event_type, None # Never reached due to exception
220
+ else:
221
+ parsed_event = self._parse_event_data(event_type, event_data)
222
+ return event_type, parsed_event
223
+
224
+ return event_type, None
225
+
226
+ async def _process_sse_stream(
227
+ self,
228
+ response: httpx.Response,
229
+ cancellation_token: asyncio.Event | None = None,
230
+ ) -> AsyncIterator[dict[str, Any]]:
231
+ """Process server-sent events from the response stream.
232
+
233
+ Args:
234
+ response: The HTTP response object
235
+ cancellation_token: Optional event to signal cancellation
236
+
237
+ Yields:
238
+ Parsed stream events
239
+
240
+ Raises:
241
+ LLMClientError: If stream processing fails
242
+ """
243
+ buffer = ""
244
+ event_type = None
245
+
246
+ try:
247
+ async for chunk in response.aiter_bytes():
248
+ if cancellation_token is not None and cancellation_token.is_set():
249
+ return
250
+
251
+ try:
252
+ buffer += chunk.decode("utf-8")
253
+ except UnicodeDecodeError as e:
254
+ logger.warning(f"Failed to decode chunk: {e}")
255
+ continue
256
+
257
+ while "\n" in buffer:
258
+ line, buffer = buffer.split("\n", 1)
259
+ line = line.strip()
260
+
261
+ if not line:
262
+ continue
263
+
264
+ event_type, event_to_yield = self._process_sse_line(
265
+ line, event_type
266
+ )
267
+ if event_to_yield is not None:
268
+ yield event_to_yield
269
+
270
+ except httpx.TimeoutException as e:
271
+ raise LLMClientError(f"Stream timeout error: {str(e)}")
272
+ except httpx.NetworkError as e:
273
+ raise LLMClientError(f"Network error during streaming: {str(e)}")
274
+ except httpx.HTTPError as e:
275
+ raise LLMClientError(f"HTTP error during streaming: {str(e)}")
276
+ except asyncio.TimeoutError:
277
+ raise LLMClientError("Stream timeout")
278
+ except Exception as e:
279
+ raise LLMClientError(f"Unexpected error during streaming: {str(e)}")
280
+
281
+ async def close(self):
282
+ """Close the HTTP client."""
283
+ if self.client and not self.client.is_closed:
284
+ await self.client.aclose()
285
+ self.client = None
@@ -0,0 +1,31 @@
1
+ """Abstract base class for LLM clients."""
2
+
3
+ from abc import ABC
4
+
5
+
6
+ class BaseLLMClient(ABC):
7
+ """Abstract base class for LLM API clients."""
8
+
9
+ def __init__(self, api_key: str, base_url: str | None = None):
10
+ """Initialize the client with API key and optional base URL.
11
+
12
+ Args:
13
+ api_key: API key for authentication
14
+ base_url: Base URL for the API (optional, uses default if not provided)
15
+ """
16
+ self.api_key = api_key
17
+ self.base_url = base_url
18
+
19
+ async def close(self):
20
+ """Close the client and clean up resources."""
21
+ # Default implementation does nothing
22
+ # Subclasses can override to clean up HTTP sessions, etc.
23
+ pass
24
+
25
+ async def __aenter__(self):
26
+ """Async context manager entry."""
27
+ return self
28
+
29
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
30
+ """Async context manager exit."""
31
+ await self.close()
@@ -0,0 +1,117 @@
1
+ """Exception classes for LLM client errors."""
2
+
3
+ from typing import Any
4
+
5
+
6
+ class LLMClientError(Exception):
7
+ """Base exception for LLM client errors."""
8
+
9
+ def __init__(
10
+ self,
11
+ message: str,
12
+ error_type: str | None = None,
13
+ status_code: int | None = None,
14
+ request_id: str | None = None,
15
+ ):
16
+ super().__init__(message)
17
+ self.error_type = error_type
18
+ self.status_code = status_code
19
+ self.request_id = request_id
20
+
21
+
22
+ class AuthenticationError(LLMClientError):
23
+ """Authentication failed - invalid API key."""
24
+
25
+ def __init__(self, message: str = "Invalid API key", **kwargs):
26
+ super().__init__(message, "authentication_error", **kwargs)
27
+
28
+
29
+ class PermissionError(LLMClientError):
30
+ """Permission denied for the requested resource."""
31
+
32
+ def __init__(self, message: str = "Permission denied", **kwargs):
33
+ super().__init__(message, "permission_error", **kwargs)
34
+
35
+
36
+ class NotFoundError(LLMClientError):
37
+ """Requested resource not found."""
38
+
39
+ def __init__(self, message: str = "Resource not found", **kwargs):
40
+ super().__init__(message, "not_found_error", **kwargs)
41
+
42
+
43
+ class InvalidRequestError(LLMClientError):
44
+ """Invalid request format or content."""
45
+
46
+ def __init__(self, message: str = "Invalid request", **kwargs):
47
+ super().__init__(message, "invalid_request_error", **kwargs)
48
+
49
+
50
+ class RequestTooLargeError(LLMClientError):
51
+ """Request exceeds maximum allowed size."""
52
+
53
+ def __init__(self, message: str = "Request too large", **kwargs):
54
+ super().__init__(message, "request_too_large", **kwargs)
55
+
56
+
57
+ class RateLimitError(LLMClientError):
58
+ """Rate limit exceeded."""
59
+
60
+ def __init__(self, message: str = "Rate limit exceeded", **kwargs):
61
+ super().__init__(message, "rate_limit_error", **kwargs)
62
+
63
+
64
+ class APIError(LLMClientError):
65
+ """Internal API error."""
66
+
67
+ def __init__(self, message: str = "Internal API error", **kwargs):
68
+ super().__init__(message, "api_error", **kwargs)
69
+
70
+
71
+ class OverloadedError(LLMClientError):
72
+ """API is temporarily overloaded."""
73
+
74
+ def __init__(self, message: str = "API temporarily overloaded", **kwargs):
75
+ super().__init__(message, "overloaded_error", **kwargs)
76
+
77
+
78
+ # Mapping of HTTP status codes to exception classes
79
+ STATUS_CODE_TO_EXCEPTION = {
80
+ 400: InvalidRequestError,
81
+ 401: AuthenticationError,
82
+ 403: PermissionError,
83
+ 404: NotFoundError,
84
+ 413: RequestTooLargeError,
85
+ 429: RateLimitError,
86
+ 500: APIError,
87
+ 529: OverloadedError,
88
+ }
89
+
90
+
91
+ def create_exception_from_response(
92
+ status_code: int,
93
+ response_data: dict[str, Any],
94
+ request_id: str | None = None,
95
+ ) -> LLMClientError:
96
+ """Create appropriate exception from HTTP response."""
97
+ error_data = response_data.get("error", {})
98
+ message = error_data.get("message", f"HTTP {status_code} error")
99
+ error_type = error_data.get("type")
100
+
101
+ exception_class = STATUS_CODE_TO_EXCEPTION.get(status_code, LLMClientError)
102
+
103
+ # Handle base vs subclass constructors
104
+ if exception_class == LLMClientError:
105
+ return exception_class(
106
+ message,
107
+ error_type,
108
+ status_code,
109
+ request_id,
110
+ )
111
+ else:
112
+ # Subclasses only take message and **kwargs
113
+ return exception_class(
114
+ message,
115
+ status_code=status_code,
116
+ request_id=request_id,
117
+ )